当前位置: 首页>>代码示例>>Python>>正文


Python torch.no_grad函数代码示例

本文整理汇总了Python中torch.no_grad函数的典型用法代码示例。如果您正苦于以下问题:Python no_grad函数的具体用法?Python no_grad怎么用?Python no_grad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了no_grad函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

def test(net, dataloader, tag=''):
    correct = 0
    total = 0
    if tag == 'Train':
        dataTestLoader = dataloader.trainloader
    else:
        dataTestLoader = dataloader.testloader
    with torch.no_grad():
        for data in dataTestLoader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    net.log('%s Accuracy of the network: %d %%' % (tag,
        100 * correct / total))

    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    with torch.no_grad():
        for data in dataTestLoader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs, 1)
            c = (predicted == labels).squeeze()
            for i in range(len(labels)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1


    for i in range(10):
        net.log('%s Accuracy of %5s : %2d %%' % (
            tag, dataloader.classes[i], 100 * class_correct[i] / class_total[i]))
开发者ID:emoyers,项目名称:vision-hw5,代码行数:35,代码来源:main.py

示例2: forward

    def forward(self, vocab):
        with torch.no_grad():
            batch_shape = vocab['sentence'].shape
            s_embedding = self.embedding(vocab['sentence'].cuda())
            a_embedding = self.embedding(vocab['aspect'].cuda())

            packed_s = pack_padded_sequence(s_embedding, vocab['sent_len'], batch_first=True)

        out_s, (h_s, c1) = self.lstm_s(packed_s) # packed output
        out_a, (h_a, c2) = self.lstm_a(a_embedding)

        with torch.no_grad():
            unpacked_out_s, _ = pad_packed_sequence(out_s, batch_first=True)

        # Pair-wise interaction matrix
        I_matrix = torch.bmm(unpacked_out_s, out_a.permute(0,2,1))

        # Column-wise softmax
        a2s_attn = F.softmax(I_matrix, dim=1)

        # Row-wise softmax => Column-wise average => aspect attention
        s2a_attn = F.softmax(I_matrix, dim=2)
        a_attn = torch.mean(s2a_attn, dim=1)

        # Final sentence attn => weighted sum of each individual a2s_attn
        s_attn = torch.bmm(a2s_attn, a_attn.unsqueeze(-1))

        final_rep = torch.bmm(unpacked_out_s.permute(0,2,1), s_attn).squeeze(-1)
        pred = self.fc(final_rep)
        return pred
开发者ID:bearcave9,项目名称:Weekend-Projects,代码行数:30,代码来源:AOA_LSTM.py

示例3: predict_proba

 def predict_proba(self,X):
     X = X.to(device =self.cf_a.device )
     
     if (self.cf_a.task_type == "regression"):
         with torch.no_grad():
             return self.forward(X)
     elif(self.cf_a.task_type == "classification"):
         with torch.no_grad():
             return  nn.functional.softmax(self.forward(X), dim = 1)
开发者ID:manuwhs,项目名称:Trapyng,代码行数:9,代码来源:GeneralVBModel.py

示例4: predict

 def predict(self, X):
     """ sklearn interface without creating graph """
     X = X.to(device =self.cf_a.device )
     if (self.cf_a.task_type == "regression"):
         with torch.no_grad():
             return self.forward(X)
     elif(self.cf_a.task_type == "classification"):
         with torch.no_grad():
             return torch.argmax(self.forward(X),1)
开发者ID:manuwhs,项目名称:Trapyng,代码行数:9,代码来源:GeneralVBModel.py

示例5: forward

 def forward(self, encoder_output, hsz, beam_width=1):
     h_i = self.get_state(encoder_output)
     context = encoder_output.output
     if beam_width > 1:
         with torch.no_grad():
             context = repeat_batch(context, beam_width)
             if type(h_i) is tuple:
                 h_i = repeat_batch(h_i[0], beam_width, dim=1), repeat_batch(h_i[1], beam_width, dim=1)
             else:
                 h_i = repeat_batch(h_i, beam_width, dim=1)
     batch_size = context.shape[0]
     h_size = (batch_size, hsz)
     with torch.no_grad():
         init_zeros = context.data.new(*h_size).zero_()
     return h_i, init_zeros, context
开发者ID:dpressel,项目名称:baseline,代码行数:15,代码来源:decoders.py

示例6: resnet_features

def resnet_features(batch_arrayd):

    with torch.no_grad():

        batch_feature = {}
        ids = list(batch_arrayd.keys())
        video_array = [x for x in batch_arrayd.values()]
        array_sizes = [x.shape[0] for x in batch_arrayd.values()]

        video1_array = np.array(video_array[0], dtype = np.float32)  # change datatype of frames to float32
        video_tensor = torch.from_numpy(video1_array)

        video_frames = video_tensor.size()[0]
        num_steps = math.ceil(video_frames / 100)
        resnet_feature = torch.zeros(video_frames,2048)

        video_tensor = video_tensor.permute(0,3,1,2) # change dimension to [?,3,224,224]

        for i in range(num_steps):
            start = i*100
            end = min((i+1)*100, video_frames)
            tensor_var = Variable(video_tensor[start:end]).to(device)
            feature = resnet50(tensor_var).data
            feature.squeeze_(3)
            feature.squeeze_(2)
            resnet_feature[start:end] = feature

        return {ids[0]:resnet_feature}
开发者ID:pawandeep2155,项目名称:Simple_video_caption,代码行数:28,代码来源:extract_features.py

示例7: test

def test(model, device, test_loader):
    model.to(device)
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        y_pred = []
        y_true = []
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            output = torch.mean(output.view(output.size(0), output.size(1), -1), dim=2)
            test_loss += F.cross_entropy(output, target)
            output = F.softmax(output, dim=1)
            confidence, pred = output.max(1)
            print('confidence: {}, prediction: {}, ground truth: {}'.format(confidence.cpu().numpy(), pred.cpu().numpy(), target.cpu().numpy()))
            y_pred += pred.data.tolist()
            y_true += target.data.tolist()
            correct += pred.eq(target.view_as(pred)).sum().item()

    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
    print(metrics.classification_report(np.asarray(y_true), np.asarray(y_pred)))
    print('confusion matrix: \n', metrics.confusion_matrix(np.asarray(y_true), np.asarray(y_pred)))
    print('\n')
开发者ID:hubutui,项目名称:SonoNet-weights,代码行数:26,代码来源:example-pytorch.py

示例8: generate_translation

def generate_translation(encoder, decoder, sentence, max_length, target_lang, search="greedy", k = None):
    """ 
    @param max_length: the max # of words that the decoder can return
    @returns decoded_words: a list of words in target language
    """    
    with torch.no_grad():
        input_tensor = sentence
        input_length = sentence.size()[1]
        
        # encode the source sentence
        encoder_hidden = encoder.init_hidden(1)
        # input_tensor 1 by 12 
        # 
        encoder_outputs, encoder_hidden = encoder(input_tensor.view(1, -1),torch.tensor([input_length]))
        # start decoding
        decoder_input = torch.tensor([[SOS_token]], device=device)  # SOS
        decoder_hidden = encoder_hidden
        decoded_words = []
        
        if search == 'greedy':
            decoded_words = greedy_search_batch(decoder, decoder_input, encoder_outputs, decoder_hidden, max_length)
        elif search == 'beam':
            if k == None:
                k = 2 # since k = 2 preforms badly
            decoded_words = beam_search(decoder, decoder_input, encoder_outputs, decoder_hidden, max_length, k, target_lang) 

        return decoded_words
开发者ID:vwrj,项目名称:neural_machine_translation,代码行数:27,代码来源:V2-Attention-Vish.py

示例9: sparse_

def sparse_(tensor, sparsity, std=0.01):
    r"""Fills the 2D input `Tensor` as a sparse matrix, where the
    non-zero elements will be drawn from the normal distribution
    :math:`\mathcal{N}(0, 0.01)`, as described in "Deep learning via
    Hessian-free optimization" - Martens, J. (2010).

    Args:
        tensor: an n-dimensional `torch.Tensor`
        sparsity: The fraction of elements in each column to be set to zero
        std: the standard deviation of the normal distribution used to generate
            the non-zero values

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.sparse_(w, sparsity=0.1)
    """
    if tensor.ndimension() != 2:
        raise ValueError("Only tensors with 2 dimensions are supported")

    rows, cols = tensor.shape
    num_zeros = int(math.ceil(sparsity * rows))

    with torch.no_grad():
        tensor.normal_(0, std)
        for col_idx in range(cols):
            row_indices = torch.randperm(rows)
            zero_indices = row_indices[:num_zeros]
            tensor[zero_indices, col_idx] = 0
    return tensor
开发者ID:xiongyw,项目名称:pytorch,代码行数:29,代码来源:init.py

示例10: xavier_uniform_

def xavier_uniform_(tensor, gain=1):
    r"""Fills the input `Tensor` with values according to the method
    described in "Understanding the difficulty of training deep feedforward
    neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform
    distribution. The resulting tensor will have values sampled from
    :math:`\mathcal{U}(-a, a)` where

    .. math::
        a = \text{gain} \times \sqrt{\frac{6}{\text{fan_in} + \text{fan_out}}}

    Also known as Glorot initialization.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        gain: an optional scaling factor

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
    """
    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = gain * math.sqrt(2.0 / (fan_in + fan_out))
    a = math.sqrt(3.0) * std  # Calculate uniform bounds from standard deviation
    with torch.no_grad():
        return tensor.uniform_(-a, a)
开发者ID:xiongyw,项目名称:pytorch,代码行数:25,代码来源:init.py

示例11: fit_norm_distribution_param

def fit_norm_distribution_param(args, model, train_dataset, channel_idx=0):
    predictions = []
    organized = []
    errors = []
    with torch.no_grad():
        # Turn on evaluation mode which disables dropout.
        model.eval()
        pasthidden = model.init_hidden(1)
        for t in range(len(train_dataset)):
            out, hidden = model.forward(train_dataset[t].unsqueeze(0), pasthidden)
            predictions.append([])
            organized.append([])
            errors.append([])
            predictions[t].append(out.data.cpu()[0][0][channel_idx])
            pasthidden = model.repackage_hidden(hidden)
            for prediction_step in range(1,args.prediction_window_size):
                out, hidden = model.forward(out, hidden)
                predictions[t].append(out.data.cpu()[0][0][channel_idx])

            if t >= args.prediction_window_size:
                for step in range(args.prediction_window_size):
                    organized[t].append(predictions[step+t-args.prediction_window_size][args.prediction_window_size-1-step])
                organized[t]= torch.FloatTensor(organized[t]).to(args.device)
                errors[t] = organized[t] - train_dataset[t][0][channel_idx]
                errors[t] = errors[t].unsqueeze(0)

    errors_tensor = torch.cat(errors[args.prediction_window_size:],dim=0)
    mean = errors_tensor.mean(dim=0)
    cov = errors_tensor.t().mm(errors_tensor)/errors_tensor.size(0) - mean.unsqueeze(1).mm(mean.unsqueeze(0))
    # cov: positive-semidefinite and symmetric.

    return mean, cov
开发者ID:chen0031,项目名称:RNN-Time-series-Anomaly-Detection,代码行数:32,代码来源:anomalyDetector.py

示例12: tts

def tts(model, text, p=0, speaker_id=None, fast=True):
    """Convert text to speech waveform given a deepvoice3 model.

    Args:
        text (str) : Input text to be synthesized
        p (float) : Replace word to pronounciation if p > 0. Default is 0.
    """
    model = model.to(device)
    model.eval()
    if fast:
        model.make_generation_fast_()

    sequence = np.array(_frontend.text_to_sequence(text, p=p))
    sequence = torch.from_numpy(sequence).unsqueeze(0).long().to(device)
    text_positions = torch.arange(1, sequence.size(-1) + 1).unsqueeze(0).long().to(device)
    speaker_ids = None if speaker_id is None else torch.LongTensor([speaker_id]).to(device)

    # Greedy decoding
    with torch.no_grad():
        mel_outputs, linear_outputs, alignments, done = model(
            sequence, text_positions=text_positions, speaker_ids=speaker_ids)

    linear_output = linear_outputs[0].cpu().data.numpy()
    spectrogram = audio._denormalize(linear_output)
    alignment = alignments[0].cpu().data.numpy()
    mel = mel_outputs[0].cpu().data.numpy()
    mel = audio._denormalize(mel)

    # Predicted audio signal
    waveform = audio.inv_spectrogram(linear_output.T)

    return waveform, alignment, spectrogram, mel
开发者ID:Saiuz,项目名称:autokeras,代码行数:32,代码来源:synthesis.py

示例13: predict

def predict(image_path, model, topk, architecture):   
    
    img = load_image(image_path)

    model.eval()   
    # set architecture (cuda or cpu)
    model.to(architecture)
    img = img.to(architecture)

    with torch.no_grad():
        output = model.forward(img)
        
    # get props
    probability = torch.exp(output.data)
    
    # get top k procs
    top_probs, top_labs = probability.topk(topk)

    # convert to numpy lists
    top_probs = top_probs.cpu().numpy()[0].tolist()
    top_labs = top_labs.cpu().numpy()[0].tolist()

    # reverse class_to_idx
    idx_to_class = {val: key for key, val in model.class_to_idx.items() }

    # map to classes from file and to string labels
    top_labels = [idx_to_class[label] for label in top_labs]
    top_flowers = [cat_to_name[idx_to_class[label]] for label in top_labs]

    return top_probs, top_labels, top_flowers
开发者ID:werjo,项目名称:image_classifier,代码行数:30,代码来源:predict.py

示例14: stylize

def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")
    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(os.path.join(args.model_dir, args.style+".pth"))
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)

        filenames = os.listdir(args.content_dir)

        for filename in filenames:
            print("Processing {}".format(filename))
            full_path = os.path.join(args.content_dir, filename)
            content_image = load_image(full_path, scale=args.content_scale)
            content_transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Lambda(lambda x: x.mul(255))
            ])
            content_image = content_transform(content_image)
            content_image = content_image.unsqueeze(0).to(device)

            output = style_model(content_image).cpu()

            output_path = os.path.join(args.output_dir, filename)
            save_image(output_path, output[0])
开发者ID:rajivramanjani,项目名称:MachineLearningNotebooks,代码行数:29,代码来源:neural_style.py

示例15: main

def main():
    args = get_arguments()

    os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu

    model = XLSor(num_classes=args.num_classes)
    
    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda()

    testloader = data.DataLoader(XRAYDataTestSet(args.data_dir, args.data_list, crop_size=(512, 512), mean=IMG_MEAN, scale=False, mirror=False), batch_size=1, shuffle=False, pin_memory=True)

    interp = nn.Upsample(size=(512, 512), mode='bilinear', align_corners=True)

    if not os.path.exists('outputs'):
        os.makedirs('outputs')

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd'%(index))
        image, size, name = batch
        with torch.no_grad():
            prediction = model(image.cuda(), args.recurrence)
            if isinstance(prediction, list):
                prediction = prediction[0]
            prediction = interp(prediction).cpu().data[0].numpy().transpose(1, 2, 0)
        output_im = PILImage.fromarray((np.clip(prediction[:,:,0],0,1)* 255).astype(np.uint8))
        output_im.save('./outputs/' + os.path.basename(name[0]).replace('.png', '_xlsor.png'), 'png')
开发者ID:rsummers11,项目名称:CADLab,代码行数:31,代码来源:test.py


注:本文中的torch.no_grad函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。