当前位置: 首页>>代码示例>>Python>>正文


Python Variable.squeeze方法代码示例

本文整理汇总了Python中torch.autograd.Variable.squeeze方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.squeeze方法的具体用法?Python Variable.squeeze怎么用?Python Variable.squeeze使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.squeeze方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: show_plot

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
def show_plot(epoch):
    plt.clf()
    plt.title(epoch)
    plt.ylim(-20,40)
    for _ in range(100):
        X_test = 10*np.random.random(N)
        X_test = Variable(torch.FloatTensor(X_test))
        X_test = X_test.unsqueeze(1)
        Y = bayesmodel(X_test)
        plt.scatter(X_test.squeeze().data.numpy(), Y.squeeze().data.numpy(),
            color='green', alpha=0.1)
    plt.scatter(input_data, output_data)
    plt.pause(0.01)
开发者ID:adrielb,项目名称:ProbabilisticProgramming,代码行数:15,代码来源:reparameterized_regression-repl.py

示例2: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
    def forward(self, x):
        # print(x)                              # <class 'torch.autograd.variable.Variable'> [torch.LongTensor of size 64x35]
        # x_size = x.data.size(1)
        # print(x_size)
        x = self.embed(x)
        x = x.unsqueeze(1)                      # (N,Ci,W,D) 在索引1处增加一维
        # print(x)                             # [torch.FloatTensor of size 64x1x35x128]
        # x = Variable(torch.transpose(x.data, 0, 1))
        # x = self.bn(x)
        # x = Variable(torch.transpose(x.data, 0, 1))
        # print(x)                             # [torch.FloatTensor of size 64x35x128]
        # if self.args.static:
        #     x = Variable(x.data)
        # print(x)                             # [torch.FloatTensor of size 64x35x128]

        # x2 = [F.relu(conv(x)).squeeze(3) for conv in self.convs1]  # [(N,Co,W), ...]*len(Ks)
        # print(x2)
        a = []
        for conv in self.convs1:
            xx = conv(x)                        # variable [torch.FloatTensor of size 16x200x35x1]
            # print(xx)
            xx = Variable(torch.transpose(xx.data, 2, 3))
            xx = Variable(torch.transpose(xx.data, 1, 2))
            xx = self.bn(xx)
            xx = F.relu(xx)
            xx = xx.squeeze(1)
            a.append(xx)
        # print(a)
        x = a
        # print(x)                             # [torch.FloatTensor of size 64x100x31],32,33,34
        x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]  # [(N,Co), ...]*len(Ks)
        # print(x)                             # [torch.FloatTensor of size 64x100]*4

        x = torch.cat(x, 1)
        # print(x)                             # [torch.FloatTensor of size 64x400]
        '''
        x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
        x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
        x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
        x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
        '''

        x = self.dropout(x)  # (N,len(Ks)*Co)
        # print(x)                            # [torch.FloatTensor of size 64x400]
        logit = self.fc1(x)  # (N,C)
        # print(logit)                        # [torch.FloatTensor of size 64x2]
        return logit
开发者ID:Joyce94,项目名称:sentence_classification,代码行数:49,代码来源:CNN.py

示例3: translate

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
    def translate(self, src, max_decode_len=2):
        """
        Translate a single input sequence using greedy decoding..

        Parameters:
        -----------

        src: torch.LongTensor (seq_len x 1)
        """
        pad = self.src_dict.get_pad()
        eos = self.src_dict.get_eos()
        bos = self.src_dict.get_bos()
        gpu = src.is_cuda
        # encode
        emb = self.src_embeddings(src)
        enc_outs, enc_hidden = self.encoder(
            emb, compute_mask=False, mask_symbol=pad)
        # decode
        dec_hidden = self.decoder.init_hidden_for(enc_hidden)
        dec_out, enc_att = None, None
        if self.decoder.att_type == 'Bahdanau':
            enc_att = self.decoder.attn.project_enc_outs(enc_outs)
        scores, hyp, atts = [], [], []
        prev = Variable(src.data.new([bos]), volatile=True).unsqueeze(0)
        if gpu: prev = prev.cuda()
        for _ in range(len(src) * max_decode_len):
            prev_emb = self.trg_embeddings(prev).squeeze(0)
            dec_out, dec_hidden, att_weights = self.decoder(
                prev_emb, dec_hidden, enc_outs, out=dec_out, enc_att=enc_att)
            # (batch x vocab_size)
            outs = self.project(dec_out)
            # (1 x batch) argmax over log-probs (take idx across batch dim)
            best_score, prev = outs.max(1)
            prev = prev.t()
            # concat of step vectors along seq_len dim
            scores.append(best_score.squeeze().data[0])
            atts.append(att_weights.squeeze().data.cpu().numpy().tolist())
            hyp.append(prev.squeeze().data[0])
            # termination criterion: decoding <eos>
            if prev.data.eq(eos).nonzero().nelement() > 0:
                break
        # add singleton dimension for compatibility with other decoding
        return [sum(scores)], [hyp], [atts]
开发者ID:mikekestemont,项目名称:seqmod,代码行数:45,代码来源:encoder_decoder.py

示例4: train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
    def train(self):

        ax_loss = []

        for epoch in range(1, self.config['epoch'] + 1):

            total_loss = 0
            self.loadBatches()
            for batch in self.batches:
                x = [b[0] for b in batch]
                label = [b[1] for b in batch]

                x = Variable(torch.Tensor(x))
                label = Variable(torch.Tensor(label)).t()
                self.optimizer.zero_grad()
                y = self.model(x)
                loss = torch.sum((y.t() - label) ** 2)
                # loss = self.criterion(y.t(), label)
                total_loss += loss.data.numpy()[0]
                loss.backward()
                self.optimizer.step()

                if epoch % self.config['display_epoch'] == 0:
                    for _y, _label in zip(y.squeeze().data.numpy(), label.squeeze().data.numpy()):
                        print 'y={:6.2f} label={:6.2f}'.format(_y, _label)

            ax_loss.append(total_loss / len(self.dataset))
            if epoch % self.config['display_epoch'] == 0:
                print 'epoch={}, loss={:.2f}'.format(epoch, total_loss / len(self.dataset))

        fig, ax = plt.subplots(figsize=(12, 5))
        ax.plot(range(1, len(ax_loss) + 1), ax_loss, 'r-', label='loss')
        ax.set_xlabel('epoch')
        ax.set_ylabel('loss')
        ax.legend(loc='upper right')
        ax.grid()
        plt.show()
开发者ID:zyoohv,项目名称:zyoohv.github.io,代码行数:39,代码来源:main.py

示例5: valid

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
    def valid(self):
        self.model.train(False)

        y_label = []
        self.loadBatches()
        for batch in self.batches:
            x = [b[0] for b in batch]
            label = [b[1] for b in batch]

            x = Variable(torch.Tensor(x))
            label = Variable(torch.Tensor(label)).t()
            y = self.model(x)

            for _y, _label in zip(y.squeeze().data.numpy(), label.squeeze().data.numpy()):
                y_label.append([_y, _label])

        fig, ax = plt.subplots(figsize=(12, 5))
        ax.plot([x[1] for x in y_label], [x[0] for x in y_label], 'r+')
        ax.set_xlabel('label')
        ax.set_ylabel('predict_result')
        ax.grid()
        plt.show()

        self.model.train(True)
开发者ID:zyoohv,项目名称:zyoohv.github.io,代码行数:26,代码来源:main.py

示例6: range

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
    times = []
    epochs = 500
    for epoch in range(epochs):
        print("Epoch", epoch)
        epoch_start = time.time()
        model.train()
        train_losses = []
        train_accuracies = []
        train_accuracies2 = []
        _start = time.time()
        for batch_idx, sequence_batch in enumerate(train_loader):
            sequence_batch = Variable(sequence_batch, requires_grad=False)
            if use_cuda:
                sequence_batch = sequence_batch.cuda()
            sequence = sequence_batch.squeeze(dim=0)
            subsequences = torch.split(sequence, split_size=100)
            for seq in subsequences:
                batch_size = 1
                seq_len = seq.size()[0]
                seq = seq.view(seq_len, -1).contiguous()
                seq = seq.unsqueeze(dim=1)
                targets = seq[1:]

                optimizer.zero_grad()
                predictions = model(seq)
                losses = [F.binary_cross_entropy(input=pred, target=targets[step]) for step, pred in enumerate(predictions[:-1])]
                loss = sum(losses)
                loss.backward()
                torch.nn.utils.clip_grad_norm(model.parameters(), .25)
                optimizer.step()
开发者ID:mbeissinger,项目名称:recurrent_gsn,代码行数:32,代码来源:bouncing_balls_untied_gsn.py

示例7: train_and_validate

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
def train_and_validate(model, train_loader, test_loader,
                       num_epochs, device = torch.device("cuda:0"),
                       learning_rate = 0.001,
                       weight_decay = 0,
                       multiGPU = False,
                       save_name = 'trained_model.pt'):
    batch_size = train_loader.batch_size
    criterion = nn.CrossEntropyLoss();
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr = learning_rate,
                                 weight_decay = weight_decay);
    # optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate,
    #                                 weight_decay = weight_decay,
    #                                 momentum = 0.6);

    patience = 15 if weight_decay > 0 else 10
    step_size = 25 if weight_decay > 0 else 15

    # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=0.4)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
    optimizer, 'max', factor=0.1, patience=patience, verbose=True)
    #Training
    print("lr:{} wd:{}".format(learning_rate, weight_decay))
    model.train().to(device)
#     if isinstance(model, EnsembleClassifier):
#         if multiGPU == True:
#             print("multiGPU")
#             model.set_devices_multiGPU()

    history = {'batch': [], 'loss': [], 'accuracy': []}
    best_val_accuracy = 0
    for epoch in range(num_epochs):
        # scheduler.step()
        model.train()
        tic=timeit.default_timer()
        losses = [] #losses in epoch per batch
        accuracies_train = [] #accuracies in epoch per batch
        for i, (images, labels) in enumerate(train_loader):
            images = Variable(images).to(device)
            labels = Variable(labels).squeeze(1).long().to(device)#.cpu()
            # Forward + Backward + Optimize
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            losses.append(loss.item())
            optimizer.step()
            _, argmax = torch.max(outputs, 1)
            accuracy_train = (labels == argmax.squeeze()).float().mean()*100
            accuracies_train.append(accuracy_train.cpu())
            # Show progress
            if (i+1) % 32 == 0:
                log = " ".join([
                  "Epoch : %d/%d" % (epoch+1, num_epochs),
                  "Iter : %d/%d" % (i+1, len(train_loader.dataset)//batch_size)])
                print('\r{}'.format(log), end=" ")

        epoch_log = " ".join([
          "Epoch : %d/%d" % (epoch+1, num_epochs),
          "Training Loss: %.4f" % np.mean(losses),
          "Training Accuracy: %.4f" % np.mean(accuracies_train)])
        print('\r{}'.format(epoch_log))

        ##VALIDATION SCORE AFTER EVERY EPOCH
        model.eval()
        correct = 0
        total = 0

        total_labels = torch.Tensor().long()
        total_predicted = torch.Tensor().long()

        for images, labels in test_loader:
            augmented_images = augment_and_transform_for_prediction(images)
            augmented_images = Variable(augmented_images).to(device)
            labels = labels.squeeze(1)
            outputs = model(augmented_images)
            
            probabilities = torch.exp(nn.LogSoftmax()(outputs))
            predicted = torch.argmax(torch.mean(probabilities, 0))

#             print(predicted)
#             print(labels)
            total += labels.size(0)
        #     print(total)
            correct += (predicted.cpu().long() == labels).sum()
            total_labels = torch.cat((total_labels,labels))
            total_predicted = torch.cat((total_predicted, predicted.cpu().long().unsqueeze(dim=0)))
            val_accuracy = 100*correct.item() / total
        print('VALIDATION SET ACCURACY: %.4f %%' % val_accuracy)
        scheduler.step(correct.item() / total)

        ###Results for analysis###
        if val_accuracy >= best_val_accuracy:
            best_val_accuracy = val_accuracy
            save_model(epoch, model, optimizer, scheduler, name = save_name)
            pickle.dump(total_predicted.cpu().long(), open("test_predicted.pkl", "wb"))
            pickle.dump(total_labels.long(), open("test_labels.pkl", "wb"))

        toc=timeit.default_timer()
        if epoch+1 == 70:
#.........这里部分代码省略.........
开发者ID:dimtsi,项目名称:Malaria_PyTorch_DSP,代码行数:103,代码来源:CNN_PyTorch.py

示例8: SkeletonsMaps

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
 s_map = SkeletonsMaps()
 
 worm_roi_t = worm_roi_t.cuda()
 s_seed = s_seed.cuda()
 skel_width = skel_width.cuda()
 model = model.cuda()
 s_map = s_map.cuda()
 
 worm_roi_t = Variable(worm_roi_t)
 s_seed = Variable(s_seed)
 skel_width = Variable(skel_width)
 #%%
 maps_o = s_map(s_seed, skel_width)
 mm = maps_o - worm_roi_t
 #%%
 bot = worm_roi_t.min()
 top = worm_roi_t.max()
 worm_roi_n = (worm_roi_t.squeeze() - bot)/(top-bot)
 
 
 p_w = (maps_o*worm_roi_n) + 1.e-5
 
 skel_map_inv = (-maps_o).add_(1)
 worm_img_inv = (-worm_roi_n.squeeze()).add_(1)
 p_bng = (skel_map_inv*worm_img_inv) + 1.e-5
 
 #p_bng = torch.sqrt(p_bng)
 
 
 #c_loss = F.binary_cross_entropy(p_w, p_bng)
 c_loss = -(p_bng*torch.log(p_w) + p_w*torch.log(p_bng)).mean()
开发者ID:ver228,项目名称:Work_In_Progress,代码行数:33,代码来源:autoencoder_to_eigen.py

示例9: print

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import squeeze [as 别名]
        if i % 300 == 0:
            print('[{}/{}] Loss: {:.6f}, Acc: {:.6f}'.format(
                epoch + 1, num_epoches, running_loss / (batch_size * i),
                running_acc / (batch_size * i)))
    print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format(
        epoch + 1, running_loss / (len(train_dataset)), running_acc / (len(
            train_dataset))))
    model.eval()
    eval_loss = 0.
    eval_acc = 0.
    for data in test_loader:
        img, label = data
        b, c, h, w = img.size()
        assert c == 1, 'channel must be 1'
        img = img.squeeze(1)
        # img = img.view(b*h, w)
        # img = torch.transpose(img, 1, 0)
        # img = img.contiguous().view(w, b, h)
        if use_gpu:
            img = Variable(img, volatile=True).cuda()
            label = Variable(label, volatile=True).cuda()
        else:
            img = Variable(img, volatile=True)
            label = Variable(label, volatile=True)
        out = model(img)
        loss = criterion(out, label)
        eval_loss += loss.data[0] * label.size(0)
        _, pred = torch.max(out, 1)
        num_correct = (pred == label).sum()
        eval_acc += num_correct.data[0]
开发者ID:caolusg,项目名称:pytorch-beginner,代码行数:32,代码来源:recurrent_network.py


注:本文中的torch.autograd.Variable.squeeze方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。