當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.Variable方法代碼示例

本文整理匯總了Python中torch.Variable方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.Variable方法的具體用法?Python torch.Variable怎麽用?Python torch.Variable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.Variable方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_dropout_mask

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.autograd.Variable):
    """
    Computes and returns an element-wise dropout mask for a given tensor, where
    each element in the mask is dropped out with probability dropout_probability.
    Note that the mask is NOT applied to the tensor - the tensor is passed to retain
    the correct CUDA tensor type for the mask.

    Parameters
    ----------
    dropout_probability : float, required.
        Probability of dropping a dimension of the input.
    tensor_for_masking : torch.Variable, required.


    Returns
    -------
    A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
    This scaling ensures expected values and variances of the output of applying this mask
     and the original tensor are the same.
    """
    binary_mask = tensor_for_masking.clone()
    binary_mask.data.copy_(torch.rand(tensor_for_masking.size()) > dropout_probability)
    # Scale mask by 1/keep_prob to preserve output statistics.
    dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
    return dropout_mask 
開發者ID:rowanz,項目名稱:neural-motifs,代碼行數:27,代碼來源:decoder_rnn.py

示例2: train

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def train(net, criterion, optimizer, train_iter):
    for p in crnn.parameters():
        p.requires_grad = True
    crnn.train()

    data = train_iter.next()
    cpu_images, cpu_texts = data
    batch_size = cpu_images.size(0)
    utils.loadData(image, cpu_images)
    t, l = converter.encode(cpu_texts)
    utils.loadData(text, t)
    utils.loadData(length, l)
    
    optimizer.zero_grad()
    preds = crnn(image)
    preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size))
    cost = criterion(preds, text, preds_size, length) / batch_size
    # crnn.zero_grad()
    cost.backward()
    optimizer.step()
    return cost 
開發者ID:Holmeyoung,項目名稱:crnn-pytorch,代碼行數:23,代碼來源:train.py

示例3: get_dropout_mask

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def get_dropout_mask(dropout_probability: float, h_dim: int):
    """
    Computes and returns an element-wise dropout mask for a given tensor, where
    each element in the mask is dropped out with probability dropout_probability.
    Note that the mask is NOT applied to the tensor - the tensor is passed to retain
    the correct CUDA tensor type for the mask.

    Parameters
    ----------
    dropout_probability : float, required.
        Probability of dropping a dimension of the input.
    tensor_for_masking : torch.Variable, required.


    Returns
    -------
    A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
    This scaling ensures expected values and variances of the output of applying this mask
     and the original tensor are the same.
    """
    binary_mask = Variable(torch.FloatTensor(h_dim).cuda().fill_(0.0))
    binary_mask.data.copy_(torch.rand(h_dim) > dropout_probability)
    # Scale mask by 1/keep_prob to preserve output statistics.
    dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
    return dropout_mask 
開發者ID:KaihuaTang,項目名稱:VCTree-Scene-Graph-Generation,代碼行數:27,代碼來源:decoder_tree_lstm.py

示例4: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def forward(self, forest, features, num_obj):
        # calc dropout mask, same for all
        if self.dropout > 0.0:
            dropout_mask = get_dropout_mask(self.dropout, self.out_dim)
        else:
            dropout_mask = None

        # tree lstm input
        out_h = None
        h_order = Variable(torch.LongTensor(num_obj).zero_().cuda()) # used to resume order
        order_idx = 0
        lstm_io = tree_utils.TreeLSTM_IO(out_h, h_order, order_idx, None, None, dropout_mask)
        # run tree lstm forward (leaves to root)
        for idx in range(len(forest)):
            self.treeLSTM(forest[idx], features, lstm_io)
        
        # resume order to the same as input
        output = torch.index_select(lstm_io.hidden, 0, lstm_io.order.long())
        return output 
開發者ID:KaihuaTang,項目名稱:VCTree-Scene-Graph-Generation,代碼行數:21,代碼來源:tree_lstm.py

示例5: calc_gradient_penalty

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def calc_gradient_penalty(netD, real_data, fake_data, device='cpu', pac=10, lambda_=10):
    alpha = torch.rand(real_data.size(0) // pac, 1, 1, device=device)
    alpha = alpha.repeat(1, pac, real_data.size(1))
    alpha = alpha.view(-1, real_data.size(1))

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    # interpolates = torch.Variable(interpolates, requires_grad=True, device=device)

    disc_interpolates = netD(interpolates)

    gradients = torch.autograd.grad(
        outputs=disc_interpolates, inputs=interpolates,
        grad_outputs=torch.ones(disc_interpolates.size(), device=device),
        create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = (
        (gradients.view(-1, pac * real_data.size(1)).norm(2, dim=1) - 1) ** 2).mean() * lambda_
    return gradient_penalty 
開發者ID:sdv-dev,項目名稱:SDGym,代碼行數:21,代碼來源:ctgan.py

示例6: get_activation_wts

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def get_activation_wts(attention_model,x):
    """
        Get r attention heads
 
        Args:
            attention_model : {object} model
            x               : {torch.Variable} input whose weights we want
       
        Returns:
            r different attention weights
 
      
    """
    attention_model.batch_size = x.size(0)
    attention_model.hidden_state = attention_model.init_hidden()
    _,wts = attention_model(x)
    return wts 
開發者ID:kaushalshetty,項目名稱:Structured-Self-Attention,代碼行數:19,代碼來源:train.py

示例7: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def forward(self, x):
        """
        Takes a batch of signals and convoles each signal with all elements in the filter
        bank. After convoling the entire filter bank, the method returns a tensor of
        shape [N,N_scales,1/2,T] where the 1/2 number of channels depends on whether
        the filter bank is composed of real or complex filters. If the filters are
        complex the 2 channels represent [real, imag] parts.

        :param x: torch.Variable, batch of input signals of shape [N,1,T]
        :return: torch.Variable, batch of outputs of size [N,N_scales,1/2,T]
        """

        if not self._filters:
            raise ValueError('PyTorch filters not initialized. Please call set_filters() first.')
            return None
        results = [None]*len(self._filters)
        for ind, conv in enumerate(self._filters):
            results[ind] = conv(x)
        results = torch.stack(results)     # [n_scales,n_batch,2,t]
        results = results.permute(1,0,2,3) # [n_batch,n_scales,2,t]
        return results 
開發者ID:tomrunia,項目名稱:PyTorchWavelets,代碼行數:23,代碼來源:network.py

示例8: add

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def add(self, v):
        if isinstance(v, Variable):
            count = v.data.numel()
            v = v.data.sum()
        elif isinstance(v, torch.Tensor):
            count = v.numel()
            v = v.sum()

        self.n_count += count
        self.sum += v 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:12,代碼來源:utils.py

示例9: asImg

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def asImg(tensor, size = None):
    """
        This function provides fast approach to transfer the image into numpy.ndarray
        This function only accept the output from sigmoid layer or hyperbolic tangent output

        Arg:    tensor  - The torch.Variable object, the rank format is BCHW or BHW
                size    - The tuple object, and the format is (height, width)
        Ret:    The numpy image, the rank format is BHWC
    """
    global channel_op
    result = tensor.detach()

    # 1. Judge the rank first
    if len(tensor.size()) == 3:
        result = torch.stack([result, result, result], 1)

    # 2. Judge the range of tensor (sigmoid output or hyperbolic tangent output)
    min_v = torch.min(result).cpu().data.numpy()
    max_v = torch.max(result).cpu().data.numpy()
    if max_v > 1.0 or min_v < -1.0:
        raise Exception('tensor value out of range...\t range is [' + str(min_v) + ' ~ ' + str(max_v))
    if min_v < 0:
        result = (result + 1) / 2

    # 3. Define the BCHW -> BHWC operation
    if channel_op is None:
        channel_op = Transpose(BCHW2BHWC)

    # 3. Rest               
    result = channel_op(result)
    result = result.cpu().data.numpy()
    if size is not None:
        result_list = []
        for img in result:
            result_list.append(transform.resize(img, (size[0], size[1]), mode = 'constant', order = 0) * 255)
        result = np.stack(result_list, axis = 0)
    else:
        result *= 255.
    result = result.astype(np.uint8)
    return result 
開發者ID:tomguluson92,項目名稱:StyleGAN2_PyTorch,代碼行數:42,代碼來源:function.py

示例10: predictor_loss_function

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def predictor_loss_function(self, prediction, target, *args, **kwargs):
        """Pure abstract method that computes the loss.

        Args:
            prediction: Prediction that was made by the model of shape
                        [BATCH_SIZE, N_LABELS]
            target: Expected result of shape [BATCH_SIZE, N_OUTPUT_TOKENS]
        Returns:
            loss: This method should return the loss as a Tensor or Variable.
        """
        return torch.Tensor(float("Inf")) 
開發者ID:pytorch,項目名稱:translate,代碼行數:13,代碼來源:word_prediction_criterion.py

示例11: analyse

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def analyse(net, inputs):
    """
    analyse the network given input
    :param net: torch.nn.Module
    :param inputs: torch.Variable, torch.Tensor or list of them
    :return: blob_dict, tracked_layers
    """
    del tracked_layers[:]
    del blob_dict[:]
    if inputs is not list:
        raw_inputs=[inputs]
    _inputs=[]
    for name,layer in net.named_modules():
        layer_name_dict[layer]=name
    for i in raw_inputs:
        if isinstance(i,Variable):
            _inputs.append(i)
        elif isinstance(i,torch.Tensor):
            _inputs.append(Variable(i))
        elif isinstance(i,np.ndarray):
            _inputs.append(Variable(torch.Tensor(i)))
        else:
            raise NotImplementedError("Not Support the input type {}".format(type(i)))
    net.apply(register)
    net.forward(*_inputs)
    return blob_dict,tracked_layers 
開發者ID:xxradon,項目名稱:PytorchToCaffe,代碼行數:28,代碼來源:PytorchA.py

示例12: tovar

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def tovar(x, cuda):
    if cuda:
        return Variable(torch.FloatTensor(x).cuda())
    else:
        return Variable(torch.FloatTensor(x.astype(np.float64))) 
開發者ID:Orkis-Research,項目名稱:Pytorch-Quaternion-Neural-Networks,代碼行數:7,代碼來源:copy_task.py

示例13: analyse

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def analyse(net, inputs):
    """
    analyse the network given input
    :param net: torch.nn.Module
    :param inputs: torch.Variable, torch.Tensor or list of them
    :return: blob_dict, tracked_layers
    """
    del tracked_layers[:]
    del blob_dict[:]
    if not isinstance(inputs,(list,tuple)):
        raw_inputs=[inputs]
    else:
        raw_inputs=inputs
    _inputs=[]
    for name,layer in net.named_modules():
        layer_name_dict[layer]=name
    for i in raw_inputs:
        if isinstance(i,Variable):
            _inputs.append(i)
        elif isinstance(i,torch.Tensor):
            _inputs.append(Variable(i))
        elif isinstance(i,np.ndarray):
            _inputs.append(Variable(torch.Tensor(i)))
        else:
            raise NotImplementedError("Not Support the input type {}".format(type(i)))
    net.apply(register)
    net.forward(*_inputs)
    for _,m in net.named_modules():
        m._forward_hooks.clear()
    print_by_layers(tracked_layers)
    return blob_dict,tracked_layers 
開發者ID:hahnyuan,項目名稱:nn_tools,代碼行數:33,代碼來源:PytorchA.py

示例14: __define_variable

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def __define_variable(self, tensor, volatile=False):
        if volatile:
            with torch.no_grad():
                return Variable(tensor)

        return Variable(tensor) 
開發者ID:Wizaron,項目名稱:instance-segmentation-pytorch,代碼行數:8,代碼來源:model.py

示例15: evaluate

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Variable [as 別名]
def evaluate(attention_model,x_test,y_test):
    """
        cv results
 
        Args:
            attention_model : {object} model
            x_test          : {nplist} x_test
            y_test          : {nplist} y_test
       
        Returns:
            cv-accuracy
 
      
    """
   
    attention_model.batch_size = x_test.shape[0]
    attention_model.hidden_state = attention_model.init_hidden()
    x_test_var = Variable(torch.from_numpy(x_test).type(torch.LongTensor))
    y_test_pred,_ = attention_model(x_test_var)
    if bool(attention_model.type):
        y_preds = torch.max(y_test_pred,1)[1]
        y_test_var = Variable(torch.from_numpy(y_test).type(torch.LongTensor))
       
    else:
        y_preds = torch.round(y_test_pred.type(torch.DoubleTensor).squeeze(1))
        y_test_var = Variable(torch.from_numpy(y_test).type(torch.DoubleTensor))
       
    return torch.eq(y_preds,y_test_var).data.sum()/x_test_var.size(0) 
開發者ID:kaushalshetty,項目名稱:Structured-Self-Attention,代碼行數:30,代碼來源:train.py


注:本文中的torch.Variable方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。