当前位置: 首页>>代码示例>>Python>>正文


Python torch.squeeze方法代码示例

本文整理汇总了Python中torch.squeeze方法的典型用法代码示例。如果您正苦于以下问题:Python torch.squeeze方法的具体用法?Python torch.squeeze怎么用?Python torch.squeeze使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.squeeze方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: r_duvenaud

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def r_duvenaud(self, h):
        # layers
        aux = []
        for l in range(len(h)):
            param_sz = self.learn_args[l].size()
            parameter_mat = torch.t(self.learn_args[l])[None, ...].expand(h[l].size(0), param_sz[1],
                                                                                      param_sz[0])

            aux.append(torch.transpose(torch.bmm(parameter_mat, torch.transpose(h[l], 1, 2)), 1, 2))

            for j in range(0, aux[l].size(1)):
                # Mask whole 0 vectors
                aux[l][:, j, :] = nn.Softmax()(aux[l][:, j, :].clone())*(torch.sum(aux[l][:, j, :] != 0, 1) > 0).expand_as(aux[l][:, j, :]).type_as(aux[l])

        aux = torch.sum(torch.sum(torch.stack(aux, 3), 3), 1)
        return self.learn_modules[0](torch.squeeze(aux)) 
开发者ID:priba,项目名称:nmp_qc,代码行数:18,代码来源:ReadoutFunction.py

示例2: m_ggnn

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def m_ggnn(self, h_v, h_w, e_vw, opt={}):

        m = Variable(torch.zeros(h_w.size(0), h_w.size(1), self.args['out']).type_as(h_w.data))

        for w in range(h_w.size(1)):
            if torch.nonzero(e_vw[:, w, :].data).size():
                for i, el in enumerate(self.args['e_label']):
                    ind = (el == e_vw[:,w,:]).type_as(self.learn_args[0][i])

                    parameter_mat = self.learn_args[0][i][None, ...].expand(h_w.size(0), self.learn_args[0][i].size(0),
                                                                            self.learn_args[0][i].size(1))

                    m_w = torch.transpose(torch.bmm(torch.transpose(parameter_mat, 1, 2),
                                                                        torch.transpose(torch.unsqueeze(h_w[:, w, :], 1),
                                                                                        1, 2)), 1, 2)
                    m_w = torch.squeeze(m_w)
                    m[:,w,:] = ind.expand_as(m_w)*m_w
        return m 
开发者ID:priba,项目名称:nmp_qc,代码行数:20,代码来源:MessageFunction.py

示例3: _char_forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def _char_forward(self, inputs):
        """
        Args:
            inputs: 3D tensor, [bs, max_len, max_len_char]

        Returns:
            char_conv_outputs: 3D tensor, [bs, max_len, output_dim]
        """
        max_len, max_len_char = inputs.size(1), inputs.size(2)
        inputs = inputs.view(-1, max_len * max_len_char)  # [bs, -1]
        input_embed = self.char_embedding(inputs)  # [bs, ml*ml_c, feature_dim]
        # input_embed = self.dropout_embed(input_embed)
        # [bs, 1, max_len, max_len_char, feature_dim]
        input_embed = input_embed.view(-1, 1, max_len, max_len_char, self.char_dim)
        # conv
        char_conv_outputs = []
        for char_encoder in self.char_encoders:
            conv_output = char_encoder(input_embed)
            pool_output = torch.squeeze(torch.max(conv_output, -2)[0], -1)
            char_conv_outputs.append(pool_output)
        char_conv_outputs = torch.cat(char_conv_outputs, dim=1)
        char_conv_outputs = char_conv_outputs.permute(0, 2, 1)

        return char_conv_outputs 
开发者ID:bamtercelboo,项目名称:pytorch_NER_BiLSTM_CNN_CRF,代码行数:26,代码来源:BiLSTM_CNN.py

示例4: MeanPixelAccuracy

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def MeanPixelAccuracy(pred, label):
    """
    Function to compute the mean pixel accuracy for semantic segmentation between mini-batch tensors
    :param pred: Tensor of predictions
    :param label: Tensor of ground-truth
    :return: Mean pixel accuracy for all the mini-bath
    """
    # Convert tensors to numpy arrays
    imPred = np.asarray(torch.squeeze(pred))
    imLab = np.asarray(torch.squeeze(label))

    # Create empty numpy arrays
    pixel_accuracy = np.empty(imLab.shape[0])
    pixel_correct = np.empty(imLab.shape[0])
    pixel_labeled = np.empty(imLab.shape[0])

    # Compute pixel accuracy for each pair of images in the batch
    for i in range(imLab.shape[0]):
        pixel_accuracy[i], pixel_correct[i], pixel_labeled[i] = pixelAccuracy(imPred[i], imLab[i])

    # Compute the final accuracy for the batch
    acc = 100.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled))

    return acc 
开发者ID:vpulab,项目名称:Semantic-Aware-Scene-Recognition,代码行数:26,代码来源:utils.py

示例5: semanticIoU

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def semanticIoU(pred, label):
    """
    Computes the mean Intersection over Union for all the classes between two mini-batch tensors of semantic
    segmentation
    :param pred: Tensor of predictions
    :param label: Tensor of ground-truth
    :return: Mean semantic intersection over Union for all the classes
    """
    imPred = np.asarray(torch.squeeze(pred))
    imLab = np.asarray(torch.squeeze(label))

    area_intersection = []
    area_union = []

    for i in range(imLab.shape[0]):
        intersection, union = intersectionAndUnion(imPred[i], imLab[i])
        area_intersection.append(intersection)
        area_union.append(union)

    IoU = 1.0 * np.sum(area_intersection, axis=0) / np.sum(np.spacing(1)+area_union, axis=0)

    return np.mean(IoU) 
开发者ID:vpulab,项目名称:Semantic-Aware-Scene-Recognition,代码行数:24,代码来源:utils.py

示例6: dis_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def dis_loss(self, real_samps, fake_samps):
        # small assertion:
        assert real_samps.device == fake_samps.device, \
            "Real and Fake samples are not on the same device"

        # device for computations:
        device = fake_samps.device

        # predictions for real images and fake images separately :
        r_preds = self.dis(real_samps)
        f_preds = self.dis(fake_samps)

        # calculate the real loss:
        real_loss = self.criterion(
            th.squeeze(r_preds),
            th.ones(real_samps.shape[0]).to(device))

        # calculate the fake loss:
        fake_loss = self.criterion(
            th.squeeze(f_preds),
            th.zeros(fake_samps.shape[0]).to(device))

        # return final losses
        return (real_loss + fake_loss) / 2 
开发者ID:akanimax,项目名称:BMSG-GAN,代码行数:26,代码来源:Losses.py

示例7: convolutional_layer

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def convolutional_layer(self, inputs):
        convolution_all = []
        conv_wts = []
        for i in range(self.seq_len):
            convolution_one_month = []
            for j in range(self.pad_size):
                convolution = self.conv(torch.unsqueeze(inputs[:, i, j], dim=1))
                convolution_one_month.append(convolution)
            convolution_one_month = torch.stack(convolution_one_month)
            convolution_one_month = torch.squeeze(convolution_one_month, dim=3)
            convolution_one_month = torch.transpose(convolution_one_month, 0, 1)
            convolution_one_month = torch.transpose(convolution_one_month, 1, 2)
            convolution_one_month = torch.squeeze(convolution_one_month, dim=1)
            convolution_one_month = self.func_tanh(convolution_one_month)
            convolution_one_month = torch.unsqueeze(convolution_one_month, dim=1)
            vec = torch.bmm(convolution_one_month, inputs[:, i])
            convolution_all.append(vec)
            conv_wts.append(convolution_one_month)
        convolution_all = torch.stack(convolution_all, dim=1)
        convolution_all = torch.squeeze(convolution_all, dim=2)
        conv_wts = torch.squeeze(torch.stack(conv_wts, dim=1), dim=2)
        return convolution_all, conv_wts 
开发者ID:BarnesLab,项目名称:Patient2Vec,代码行数:24,代码来源:Patient2Vec.py

示例8: _pad_image

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def _pad_image(img, crop_size):
    b, c, h, w = img.shape
    assert(c == 3)
    padh = crop_size[0] - h if h < crop_size[0] else 0
    padw = crop_size[1] - w if w < crop_size[1] else 0
    if padh == 0 and padw == 0:
        return img
    img_pad = F.pad(img, (0, padh, 0, padw))

    # TODO clean this code
    # mean = cfg.DATASET.MEAN
    # std = cfg.DATASET.STD
    # pad_values = -np.array(mean) / np.array(std)
    # img_pad = torch.zeros((b, c, h + padh, w + padw)).to(img.device)
    # for i in range(c):
    #     # print(img[:, i, :, :].unsqueeze(1).shape)
    #     img_pad[:, i, :, :] = torch.squeeze(
    #         F.pad(img[:, i, :, :].unsqueeze(1), (0, padh, 0, padw),
    #               'constant', value=pad_values[i]), 1)
    # assert(img_pad.shape[2] >= crop_size[0] and img_pad.shape[3] >= crop_size[1])

    return img_pad 
开发者ID:LikeLy-Journey,项目名称:SegmenTron,代码行数:24,代码来源:segbase.py

示例9: _graph_fn_state_value_function_loss_per_item

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def _graph_fn_state_value_function_loss_per_item(self, state_values, advantages, time_percentage=None):
        """
        Computes the loss for V(s).

        Args:
            state_values (SingleDataOp): Baseline predictions V(s).
            advantages (SingleDataOp): Advantage values.

        Returns:
            SingleDataOp: Baseline loss per item.
        """
        v_targets = None
        if get_backend() == "tf":
            state_values = tf.squeeze(input=state_values, axis=-1)
            v_targets = advantages + state_values
            v_targets = tf.stop_gradient(input=v_targets)
        elif get_backend() == "pytorch":
            state_values = torch.squeeze(state_values, dim=-1)
            v_targets = advantages + state_values
            v_targets = v_targets.detach()

        vf_loss = (v_targets - state_values) ** 2
        return self.weight_vf.get(time_percentage) * vf_loss 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:25,代码来源:actor_critic_loss_function.py

示例10: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def forward(self, h, r, t):
        h_emb, r_emb, t_emb = self.embed(h, r, t)
        first_dimen = list(h_emb.shape)[0]
        
        stacked_h = torch.unsqueeze(h_emb, dim=1)
        stacked_r = torch.unsqueeze(r_emb, dim=1)
        stacked_t = torch.unsqueeze(t_emb, dim=1)

        stacked_hrt = torch.cat([stacked_h, stacked_r, stacked_t], dim=1)
        stacked_hrt = torch.unsqueeze(stacked_hrt, dim=1)  # [b, 1, 3, k]

        stacked_hrt = [conv_layer(stacked_hrt) for conv_layer in self.conv_list]
        stacked_hrt = torch.cat(stacked_hrt, dim=3)
        stacked_hrt = stacked_hrt.view(first_dimen, -1)
        preds = self.fc1(stacked_hrt)
        preds = torch.squeeze(preds, dim=-1)
        return preds 
开发者ID:Sujit-O,项目名称:pykg2vec,代码行数:19,代码来源:pointwise.py

示例11: validate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
          .format(acc=accuracies, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg 
开发者ID:priba,项目名称:nmp_qc,代码行数:35,代码来源:demo_grec_intnet.py

示例12: validate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
          .format(acc=accuracies, loss=losses))

    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg 
开发者ID:priba,项目名称:nmp_qc,代码行数:36,代码来源:demo_letter_duvenaud.py

示例13: validate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def validate(val_loader, model, criterion, evaluation, logger=None):
    losses = AverageMeter()
    accuracies = AverageMeter()

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(val_loader):

        # Prepare input data
        target = torch.squeeze(target).type(torch.LongTensor)
        if args.cuda:
            g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
        g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

        # Compute output
        output = model(g, h, e)

        # Logs
        test_loss = criterion(output, target)
        acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])

        losses.update(test_loss.data[0], g.size(0))
        accuracies.update(acc.data[0], g.size(0))

    print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
          .format(acc=accuracies, loss=losses))
          
    if logger is not None:
        logger.log_value('test_epoch_loss', losses.avg)
        logger.log_value('test_epoch_accuracy', accuracies.avg)

    return accuracies.avg 
开发者ID:priba,项目名称:nmp_qc,代码行数:35,代码来源:demo_gwhist_duvenaud.py

示例14: u_intnet

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def u_intnet(self, h_v, m_v, opt):
        if opt['x_v'].ndimension():
            input_tensor = torch.cat([h_v, opt['x_v'], torch.squeeze(m_v)], 1)
        else:
            input_tensor = torch.cat([h_v, torch.squeeze(m_v)], 1)

        return self.learn_modules[0](input_tensor) 
开发者ID:priba,项目名称:nmp_qc,代码行数:9,代码来源:UpdateFunction.py

示例15: u_mpnn

# 需要导入模块: import torch [as 别名]
# 或者: from torch import squeeze [as 别名]
def u_mpnn(self, h_v, m_v, opt={}):
        h_in = h_v.view(-1,h_v.size(2))
        m_in = m_v.view(-1,m_v.size(2))
        h_new = self.learn_modules[0](m_in[None,...],h_in[None,...])[0] # 0 or 1???
        return torch.squeeze(h_new).view(h_v.size()) 
开发者ID:priba,项目名称:nmp_qc,代码行数:7,代码来源:UpdateFunction.py


注:本文中的torch.squeeze方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。