当前位置: 首页>>代码示例>>Python>>正文


Python torch.sort函数代码示例

本文整理汇总了Python中torch.sort函数的典型用法代码示例。如果您正苦于以下问题:Python sort函数的具体用法?Python sort怎么用?Python sort使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了sort函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_BiLSTM

def test_BiLSTM(x, x_mask):
    # batch_size = 2, seq_length = 6, hidden = 3
    lstm = nn.LSTM(input_size=4, hidden_size=2, num_layers=1, bidirectional=True)

    # sequence_output = np.array([[[0.1, 0.2, 0.1],
    #                              [0.4, 0.5, 0.6],
    #                              [0.1, 0.1, 0.1],
    #                              [0.2, 0.2, 0.2],
    #                              [0.3, 0.1, 0.2],
    #                              [0.4, 0.1, 0.3]], [[0, 0.1, 0.1],
    #                                                 [0.2, 0.3, 0.6],
    #                                                 [0.3, 0.2, 0.1],
    #                                                 [0.1, 0.1, 0.2],
    #                                                 [0.2, 0.2, 0.3],
    #                                                 [0.2, 0.3, 0.4]]])
    # x = torch.from_numpy(sequence_output)
    # x = x.type(torch.float32)
    # x_mask = torch.from_numpy(np.array([[1, 1, 1, 1, 0 ,0], [1, 1, 1, 1, 1, 0]]))
    x_lengths = x_mask.eq(1).sum(1)
    _, idx_sort = torch.sort(x_lengths, dim=0, descending=True) #
    _, idx_unsort = torch.sort(idx_sort, dim=0)
    x = x.index_select(0, idx_sort)
    x_lengths = x_lengths[idx_sort]
    x_packed = nn.utils.rnn.pack_padded_sequence(x, x_lengths, batch_first=True)
    y_packed, _ = lstm(x_packed)
    y_unpacked, _ = nn.utils.rnn.pad_packed_sequence(y_packed, batch_first=True)
    y_unpacked = y_unpacked.index_select(0, idx_unsort)

    if y_unpacked.size(1) != x_mask.size(1):
        padding = torch.zeros(y_unpacked.size(0), x_mask.size(1) - y_unpacked.size(1), y_unpacked.size(2)).type(
            y_unpacked.type())
        y_unpacked = torch.cat((y_unpacked, padding), dim=1)
    #return y_unpacked
    #print(idx_sort, idx_unsort, x_lengths, x_packed, y_packed, y_unpacked)
    return y_unpacked
开发者ID:Accagain2014,项目名称:ML,代码行数:35,代码来源:torch_examples.py

示例2: postprocess_sequence

    def postprocess_sequence(self, X):
        """Embed (variable-length) sequences

        Parameters
        ----------
        X : list
            List of input sequences

        Returns
        -------
        fX : numpy array
            Batch of sequence embeddings.
        """

        lengths = torch.tensor([len(x) for x in X])
        sorted_lengths, sort = torch.sort(lengths, descending=True)
        _, unsort = torch.sort(sort)

        sequences = [torch.tensor(X[i],
                                  dtype=torch.float32,
                                  device=self.device) for i in sort]
        padded = pad_sequence(sequences, batch_first=True, padding_value=0)
        packed = pack_padded_sequence(padded, sorted_lengths,
                                      batch_first=True)

        cpu = torch.device('cpu')
        fX = self.model(packed).detach().to(cpu).numpy()
        return fX[unsort]
开发者ID:instinct2k18,项目名称:pyannote-audio,代码行数:28,代码来源:extraction.py

示例3: sort_batch

def sort_batch(seqbatch):
    """Sorts torch tensor of integer indices by decreasing order."""
    # 0 is padding_idx
    omask = (seqbatch != 0)
    olens = omask.sum(0)
    slens, sidxs = torch.sort(olens, descending=True)
    oidxs = torch.sort(sidxs)[1]
    return (oidxs, sidxs, slens.data.tolist(), omask.float())
开发者ID:bardetadrien,项目名称:nmtpytorch,代码行数:8,代码来源:data.py

示例4: _PyramidRoI_Feat

    def _PyramidRoI_Feat(self, feat_maps, rois, im_info):
        ''' roi pool on pyramid feature maps'''
        # do roi pooling based on predicted rois
        img_area = im_info[0][0] * im_info[0][1]
        h = rois.data[:, 4] - rois.data[:, 2] + 1
        w = rois.data[:, 3] - rois.data[:, 1] + 1
        roi_level = torch.log(torch.sqrt(h * w) / 224.0) / np.log(2)
        roi_level = torch.floor(roi_level + 4)
        # --------
        # roi_level = torch.log(torch.sqrt(h * w) / 224.0)
        # roi_level = torch.round(roi_level + 4)
        # ------
        roi_level[roi_level < 2] = 2
        roi_level[roi_level > 5] = 5
        # roi_level.fill_(5)
        if cfg.POOLING_MODE == 'crop':
            # pdb.set_trace()
            # pooled_feat_anchor = _crop_pool_layer(base_feat, rois.view(-1, 5))
            # NOTE: need to add pyrmaid
            grid_xy = _affine_grid_gen(rois, feat_maps.size()[2:], self.grid_size)  ##
            grid_yx = torch.stack([grid_xy.data[:,:,:,1], grid_xy.data[:,:,:,0]], 3).contiguous()
            roi_pool_feat = self.RCNN_roi_crop(feat_maps, Variable(grid_yx).detach()) ##
            if cfg.CROP_RESIZE_WITH_MAX_POOL:
                roi_pool_feat = F.max_pool2d(roi_pool_feat, 2, 2)

        elif cfg.POOLING_MODE == 'align':
            roi_pool_feats = []
            box_to_levels = []
            for i, l in enumerate(range(2, 6)):
                if (roi_level == l).sum() == 0:
                    continue
                idx_l = (roi_level == l).nonzero().squeeze()
                box_to_levels.append(idx_l)
                scale = feat_maps[i].size(2) / im_info[0][0]
                feat = self.RCNN_roi_align(feat_maps[i], rois[idx_l], scale)
                roi_pool_feats.append(feat)
            roi_pool_feat = torch.cat(roi_pool_feats, 0)
            box_to_level = torch.cat(box_to_levels, 0)
            idx_sorted, order = torch.sort(box_to_level)
            roi_pool_feat = roi_pool_feat[order]

        elif cfg.POOLING_MODE == 'pool':
            roi_pool_feats = []
            box_to_levels = []
            for i, l in enumerate(range(2, 6)):
                if (roi_level == l).sum() == 0:
                    continue
                idx_l = (roi_level == l).nonzero().squeeze()
                box_to_levels.append(idx_l)
                scale = feat_maps[i].size(2) / im_info[0][0]
                feat = self.RCNN_roi_pool(feat_maps[i], rois[idx_l], scale)
                roi_pool_feats.append(feat)
            roi_pool_feat = torch.cat(roi_pool_feats, 0)
            box_to_level = torch.cat(box_to_levels, 0)
            idx_sorted, order = torch.sort(box_to_level)
            roi_pool_feat = roi_pool_feat[order]
            
        return roi_pool_feat
开发者ID:UGuess,项目名称:FaceDetection-DSFD,代码行数:58,代码来源:fpn.py

示例5: forward

 def forward(self, inputs, lengths, hidden=None):
     lens, indices = torch.sort(inputs.data.new(lengths).long(), 0, True)
     inputs = inputs[indices] if self.batch_first else inputs[:, indices] 
     outputs, (h, c) = self.rnn(pack(inputs, lens.tolist(), 
         batch_first=self.batch_first), hidden)
     outputs = unpack(outputs, batch_first=self.batch_first)[0]
     _, _indices = torch.sort(indices, 0)
     outputs = outputs[_indices] if self.batch_first else outputs[:, _indices]
     h, c = h[:, _indices, :], h[:, _indices, :]
     return outputs, (h, c)
开发者ID:Hopeflower,项目名称:decaNLP,代码行数:10,代码来源:common.py

示例6: _forward_padded

    def _forward_padded(self, x, x_mask):
        """Slower (significantly), but more precise,
        encoding that handles padding."""
        # Compute sorted sequence lengths
        lengths = x_mask.data.eq(0).long().sum(1).squeeze()
        _, idx_sort = torch.sort(lengths, dim=0, descending=True)
        _, idx_unsort = torch.sort(idx_sort, dim=0)

        lengths = list(lengths[idx_sort])
        idx_sort = Variable(idx_sort)
        idx_unsort = Variable(idx_unsort)

        # Sort x
        x = x.index_select(0, idx_sort)

        # Transpose batch and sequence dims
        x = x.transpose(0, 1)

        # Pack it up
        rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)

        # Encode all layers
        outputs = [rnn_input]
        for i in range(self.num_layers):
            rnn_input = outputs[-1]

            # Apply dropout to input
            if self.dropout_rate > 0:
                dropout_input = F.dropout(rnn_input.data,
                                          p=self.dropout_rate,
                                          training=self.training)
                rnn_input = nn.utils.rnn.PackedSequence(dropout_input,
                                                        rnn_input.batch_sizes)
            outputs.append(self.rnns[i](rnn_input)[0])

        # Unpack everything
        for i, o in enumerate(outputs[1:], 1):
            outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]

        # Concat hidden layers or take final
        if self.concat_layers:
            output = torch.cat(outputs[1:], 2)
        else:
            output = outputs[-1]

        # Transpose and unsort
        output = output.transpose(0, 1)
        output = output.index_select(0, idx_unsort)

        # Dropout on output layer
        if self.dropout_output and self.dropout_rate > 0:
            output = F.dropout(output,
                               p=self.dropout_rate,
                               training=self.training)
        return output
开发者ID:ahiroto,项目名称:ParlAI,代码行数:55,代码来源:layers.py

示例7: forward

    def forward(self, words, seq_lens):
        emb = self.embedder(words)
        seq_lens, idx_sort = torch.sort(seq_lens, descending=True)
        _, idx_unsort = torch.sort(idx_sort, descending=False)

        sorted_input = emb.index_select(1, torch.autograd.Variable(idx_sort))
        packed_input = nn.utils.rnn.pack_padded_sequence(sorted_input, seq_lens.tolist())
        packed_output = self.rnn(packed_input)[0]
        sorted_rnn_output = nn.utils.rnn.pad_packed_sequence(packed_output)[0]
        rnn_output = sorted_rnn_output.index_select(1, torch.autograd.Variable(idx_unsort))

        return rnn_output.max(0)[0]
开发者ID:guitarmind,项目名称:DME,代码行数:12,代码来源:encoders.py

示例8: validate

def validate():
    softmaxer = torch.nn.Softmax(dim=1)
    model.eval()
    correct = total = 0
    precisionmat = (1/np.arange(1,21))[::-1].cumsum()[::-1]
    precisionmat = torch.cuda.FloatTensor(precisionmat.copy())
    precision = 0
    crossentropy = 0
    hidden = model.initHidden()
    for batch in iter(val_iter):
        sentences = batch.text # n=32,bs
        if torch.cuda.is_available():
            sentences = sentences.cuda()
        out, hidden = model(sentences, hidden)
        for j in range(sentences.size(0)-1):
            outj = out[j] # bs,|V|
            labelsj = sentences[j+1] # bs
            # cross entropy
            crossentropy += F.cross_entropy(outj,labelsj,size_average=False,ignore_index=padidx)
            # precision
            outj, labelsj = softmaxer(outj).data, labelsj.data
            _, outsort = torch.sort(outj,dim=1,descending=True)
            outsort = outsort[:,:20]
            inds = (outsort-labelsj.unsqueeze(1)==0)
            inds = inds.sum(dim=0).type(torch.cuda.FloatTensor)
            precision += inds.dot(precisionmat)
            # plain ol accuracy
            _, predicted = torch.max(outj, 1)
            total += labelsj.ne(padidx).int().sum()
            correct += (predicted==labelsj).sum()
            # DEBUGGING: see the rest in trigram.py
        hidden = repackage_hidden(hidden)
    return correct/total, precision/total, torch.exp(crossentropy/total).data[0]
开发者ID:anihamde,项目名称:cs287-s18,代码行数:33,代码来源:lstm.py

示例9: eliminate_rows

 def eliminate_rows(self, prob_sc, ind, phis):
     """ eliminate rows of phis and prob_matrix scale """
     length = prob_sc.size()[1]
     mask = (prob_sc[:, :, 0] > 0.85).type(dtype)
     rang = (Variable(torch.range(0, length - 1).unsqueeze(0)
             .expand_as(mask)).
             type(dtype))
     ind_sc = torch.sort(rang * (1-mask) + length * mask, 1)[1]
     # permute prob_sc
     m = mask.unsqueeze(2).expand_as(prob_sc)
     mm = m.clone()
     mm[:, :, 1:] = 0
     prob_sc = (torch.gather(prob_sc * (1 - m) + mm, 1,
                ind_sc.unsqueeze(2).expand_as(prob_sc)))
     # compose permutations
     ind = torch.gather(ind, 1, ind_sc)
     active = torch.gather(1-mask, 1, ind_sc)
     # permute phis
     active1 = active.unsqueeze(2).expand_as(phis)
     ind1 = ind.unsqueeze(2).expand_as(phis)
     active2 = active.unsqueeze(1).expand_as(phis)
     ind2 = ind.unsqueeze(1).expand_as(phis)
     phis_out = torch.gather(phis, 1, ind1) * active1
     phis_out = torch.gather(phis_out, 2, ind2) * active2
     return prob_sc, ind, phis_out, active
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:25,代码来源:DCN.py

示例10: value

    def value(self):
        # case when number of elements added are 0
        if self.scores.shape[0] == 0:
            return 0.5

        # sorting the arrays
        scores, sortind = torch.sort(torch.from_numpy(self.scores), dim=0, descending=True)
        scores = scores.numpy()
        sortind = sortind.numpy()

        # creating the roc curve
        tpr = np.zeros(shape=(scores.size + 1), dtype=np.float64)
        fpr = np.zeros(shape=(scores.size + 1), dtype=np.float64)

        for i in range(1, scores.size + 1):
            if self.targets[sortind[i - 1]] == 1:
                tpr[i] = tpr[i - 1] + 1
                fpr[i] = fpr[i - 1]
            else:
                tpr[i] = tpr[i - 1]
                fpr[i] = fpr[i - 1] + 1

        tpr /= (self.targets.sum() * 1.0)
        fpr /= ((self.targets - 1.0).sum() * -1.0)

        # calculating area under curve using trapezoidal rule
        n = tpr.shape[0]
        h = fpr[1:n] - fpr[0:n - 1]
        sum_h = np.zeros(fpr.shape)
        sum_h[0:n - 1] = h
        sum_h[1:n] += h
        area = (sum_h * tpr).sum() / 2.0

        return (area, tpr, fpr)
开发者ID:elanmart,项目名称:tnt,代码行数:34,代码来源:aucmeter.py

示例11: non_max_suppression

def non_max_suppression(prediction, num_classes, conf_thres=0.5, nms_thres=0.4):
    """
    Removes detections with lower object confidence score than 'conf_thres' and performs
    Non-Maximum Suppression to further filter detections.
    Returns detections with shape:
        (x1, y1, x2, y2, object_conf, class_score, class_pred)
    """

    # From (center x, center y, width, height) to (x1, y1, x2, y2)
    box_corner = prediction.new(prediction.shape)
    box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
    box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
    box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
    box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
    prediction[:, :, :4] = box_corner[:, :, :4]

    output = [None for _ in range(len(prediction))]
    for image_i, image_pred in enumerate(prediction):
        # Filter out confidence scores below threshold
        conf_mask = (image_pred[:, 4] >= conf_thres).squeeze()
        image_pred = image_pred[conf_mask]
        # If none are remaining => process next image
        if not image_pred.size(0):
            continue
        # Get score and class with highest confidence
        class_conf, class_pred = torch.max(image_pred[:, 5 : 5 + num_classes], 1, keepdim=True)
        # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
        detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)
        # Iterate through all predicted classes
        unique_labels = detections[:, -1].cpu().unique()
        if prediction.is_cuda:
            unique_labels = unique_labels.cuda()
        for c in unique_labels:
            # Get the detections with the particular class
            detections_class = detections[detections[:, -1] == c]
            # Sort the detections by maximum objectness confidence
            _, conf_sort_index = torch.sort(detections_class[:, 4], descending=True)
            detections_class = detections_class[conf_sort_index]
            # Perform non-maximum suppression
            max_detections = []
            while detections_class.size(0):
                # Get detection with highest confidence and save as max detection
                max_detections.append(detections_class[0].unsqueeze(0))
                # Stop if we're at the last detection
                if len(detections_class) == 1:
                    break
                # Get the IOUs for all boxes with lower confidence
                ious = bbox_iou(max_detections[-1], detections_class[1:])
                # Remove detections with IoU >= NMS threshold
                detections_class = detections_class[1:][ious < nms_thres]

            max_detections = torch.cat(max_detections).data
            # Add max detections to outputs
            output[image_i] = (
                max_detections if output[image_i] is None else torch.cat((output[image_i], max_detections))
            )

    return output
开发者ID:cf904c27,项目名称:PyTorch-YOLOv3,代码行数:58,代码来源:utils.py

示例12: sample

 def sample(self):
     # print(torch.sort(torch.randn(200, self.latent_variable_size), dim=0))
     sample = Variable(torch.sort(torch.randn(200, self.latent_variable_size), dim=0)[0])
     sample = self.decode(sample)
     # print(sample)
     # print(sample.size())
     # save_image(sample.data.view(64, 1, 15, 20)*255,
     #         'sample_' + str(epoch) + '.png')
     return sample.data.numpy()
开发者ID:zimmerman-cole,项目名称:esn_experiments,代码行数:9,代码来源:VAE.py

示例13: find_neighbors

def find_neighbors(im, image_set, image_names, num_neighbors=num_neighbors):
    # compute the L2 distances
    dists = compute_L2_dists(im, image_set)
    # sort in the order of increasing distance
    sorted, indices = torch.sort(dists, dim=0, descending=False)
    indices = indices.cpu()
    # pick the nearest neighbors
    nn_names = [image_names[i] for i in indices[:num_neighbors]]
    return nn_names, indices[:num_neighbors]
开发者ID:TaihuLight,项目名称:biogans,代码行数:9,代码来源:nearest_neighbors.py

示例14: from_batch

    def from_batch(self, translation_batch):
        batch = translation_batch["batch"]
        assert(len(translation_batch["gold_score"]) ==
               len(translation_batch["predictions"]))
        batch_size = batch.batch_size

        preds, pred_score, attn, gold_score, indices = list(zip(
            *sorted(zip(translation_batch["predictions"],
                        translation_batch["scores"],
                        translation_batch["attention"],
                        translation_batch["gold_score"],
                        batch.indices.data),
                    key=lambda x: x[-1])))

        # Sorting
        inds, perm = torch.sort(batch.indices.data)
        data_type = self.data.data_type
        if data_type == 'text':
            src = batch.src[0].data.index_select(1, perm)
        else:
            src = None

        if self.has_tgt:
            tgt = batch.tgt.data.index_select(1, perm)
        else:
            tgt = None

        translations = []
        for b in range(batch_size):
            if data_type == 'text':
                src_vocab = self.data.src_vocabs[inds[b]] \
                  if self.data.src_vocabs else None
                src_raw = self.data.examples[inds[b]].src
            else:
                src_vocab = None
                src_raw = None
            pred_sents = [self._build_target_tokens(
                src[:, b] if src is not None else None,
                src_vocab, src_raw,
                preds[b][n], attn[b][n])
                          for n in range(self.n_best)]
            gold_sent = None
            if tgt is not None:
                gold_sent = self._build_target_tokens(
                    src[:, b] if src is not None else None,
                    src_vocab, src_raw,
                    tgt[1:, b] if tgt is not None else None, None)

            translation = Translation(src[:, b] if src is not None else None,
                                      src_raw, pred_sents,
                                      attn[b], pred_score[b], gold_sent,
                                      gold_score[b])
            translations.append(translation)

        return translations
开发者ID:xiamengzhou,项目名称:OpenNMT-py,代码行数:55,代码来源:Translation.py

示例15: isCompact

 def isCompact(tensor):
     # isn't it enough to check if strides == size.cumprod(0)?
     sortedStride, perm = torch.sort(torch.LongTensor(tensor.stride()), 0, True)
     sortedSize = torch.LongTensor(list(tensor.size())).index_select(0, perm)
     nRealDim = int(torch.clamp(sortedStride, 0, 1).sum())
     sortedStride = sortedStride.narrow(0, 0, nRealDim).clone()
     sortedSize = sortedSize.narrow(0, 0, nRealDim).clone()
     t = tensor.new().set_(tensor.storage(), 0,
                           tuple(sortedSize),
                           tuple(sortedStride))
     return t.is_contiguous()
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:11,代码来源:Module.py


注:本文中的torch.sort函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。