当前位置: 首页>>代码示例>>Python>>正文


Python torch.Variable方法代码示例

本文整理汇总了Python中torch.Variable方法的典型用法代码示例。如果您正苦于以下问题:Python torch.Variable方法的具体用法?Python torch.Variable怎么用?Python torch.Variable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.Variable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_dropout_mask

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.autograd.Variable):
    """
    Computes and returns an element-wise dropout mask for a given tensor, where
    each element in the mask is dropped out with probability dropout_probability.
    Note that the mask is NOT applied to the tensor - the tensor is passed to retain
    the correct CUDA tensor type for the mask.

    Parameters
    ----------
    dropout_probability : float, required.
        Probability of dropping a dimension of the input.
    tensor_for_masking : torch.Variable, required.


    Returns
    -------
    A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
    This scaling ensures expected values and variances of the output of applying this mask
     and the original tensor are the same.
    """
    binary_mask = tensor_for_masking.clone()
    binary_mask.data.copy_(torch.rand(tensor_for_masking.size()) > dropout_probability)
    # Scale mask by 1/keep_prob to preserve output statistics.
    dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
    return dropout_mask 
开发者ID:rowanz,项目名称:neural-motifs,代码行数:27,代码来源:decoder_rnn.py

示例2: train

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def train(net, criterion, optimizer, train_iter):
    for p in crnn.parameters():
        p.requires_grad = True
    crnn.train()

    data = train_iter.next()
    cpu_images, cpu_texts = data
    batch_size = cpu_images.size(0)
    utils.loadData(image, cpu_images)
    t, l = converter.encode(cpu_texts)
    utils.loadData(text, t)
    utils.loadData(length, l)
    
    optimizer.zero_grad()
    preds = crnn(image)
    preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size))
    cost = criterion(preds, text, preds_size, length) / batch_size
    # crnn.zero_grad()
    cost.backward()
    optimizer.step()
    return cost 
开发者ID:Holmeyoung,项目名称:crnn-pytorch,代码行数:23,代码来源:train.py

示例3: get_dropout_mask

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def get_dropout_mask(dropout_probability: float, h_dim: int):
    """
    Computes and returns an element-wise dropout mask for a given tensor, where
    each element in the mask is dropped out with probability dropout_probability.
    Note that the mask is NOT applied to the tensor - the tensor is passed to retain
    the correct CUDA tensor type for the mask.

    Parameters
    ----------
    dropout_probability : float, required.
        Probability of dropping a dimension of the input.
    tensor_for_masking : torch.Variable, required.


    Returns
    -------
    A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
    This scaling ensures expected values and variances of the output of applying this mask
     and the original tensor are the same.
    """
    binary_mask = Variable(torch.FloatTensor(h_dim).cuda().fill_(0.0))
    binary_mask.data.copy_(torch.rand(h_dim) > dropout_probability)
    # Scale mask by 1/keep_prob to preserve output statistics.
    dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
    return dropout_mask 
开发者ID:KaihuaTang,项目名称:VCTree-Scene-Graph-Generation,代码行数:27,代码来源:decoder_tree_lstm.py

示例4: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def forward(self, forest, features, num_obj):
        # calc dropout mask, same for all
        if self.dropout > 0.0:
            dropout_mask = get_dropout_mask(self.dropout, self.out_dim)
        else:
            dropout_mask = None

        # tree lstm input
        out_h = None
        h_order = Variable(torch.LongTensor(num_obj).zero_().cuda()) # used to resume order
        order_idx = 0
        lstm_io = tree_utils.TreeLSTM_IO(out_h, h_order, order_idx, None, None, dropout_mask)
        # run tree lstm forward (leaves to root)
        for idx in range(len(forest)):
            self.treeLSTM(forest[idx], features, lstm_io)
        
        # resume order to the same as input
        output = torch.index_select(lstm_io.hidden, 0, lstm_io.order.long())
        return output 
开发者ID:KaihuaTang,项目名称:VCTree-Scene-Graph-Generation,代码行数:21,代码来源:tree_lstm.py

示例5: calc_gradient_penalty

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def calc_gradient_penalty(netD, real_data, fake_data, device='cpu', pac=10, lambda_=10):
    alpha = torch.rand(real_data.size(0) // pac, 1, 1, device=device)
    alpha = alpha.repeat(1, pac, real_data.size(1))
    alpha = alpha.view(-1, real_data.size(1))

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    # interpolates = torch.Variable(interpolates, requires_grad=True, device=device)

    disc_interpolates = netD(interpolates)

    gradients = torch.autograd.grad(
        outputs=disc_interpolates, inputs=interpolates,
        grad_outputs=torch.ones(disc_interpolates.size(), device=device),
        create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = (
        (gradients.view(-1, pac * real_data.size(1)).norm(2, dim=1) - 1) ** 2).mean() * lambda_
    return gradient_penalty 
开发者ID:sdv-dev,项目名称:SDGym,代码行数:21,代码来源:ctgan.py

示例6: get_activation_wts

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def get_activation_wts(attention_model,x):
    """
        Get r attention heads
 
        Args:
            attention_model : {object} model
            x               : {torch.Variable} input whose weights we want
       
        Returns:
            r different attention weights
 
      
    """
    attention_model.batch_size = x.size(0)
    attention_model.hidden_state = attention_model.init_hidden()
    _,wts = attention_model(x)
    return wts 
开发者ID:kaushalshetty,项目名称:Structured-Self-Attention,代码行数:19,代码来源:train.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def forward(self, x):
        """
        Takes a batch of signals and convoles each signal with all elements in the filter
        bank. After convoling the entire filter bank, the method returns a tensor of
        shape [N,N_scales,1/2,T] where the 1/2 number of channels depends on whether
        the filter bank is composed of real or complex filters. If the filters are
        complex the 2 channels represent [real, imag] parts.

        :param x: torch.Variable, batch of input signals of shape [N,1,T]
        :return: torch.Variable, batch of outputs of size [N,N_scales,1/2,T]
        """

        if not self._filters:
            raise ValueError('PyTorch filters not initialized. Please call set_filters() first.')
            return None
        results = [None]*len(self._filters)
        for ind, conv in enumerate(self._filters):
            results[ind] = conv(x)
        results = torch.stack(results)     # [n_scales,n_batch,2,t]
        results = results.permute(1,0,2,3) # [n_batch,n_scales,2,t]
        return results 
开发者ID:tomrunia,项目名称:PyTorchWavelets,代码行数:23,代码来源:network.py

示例8: add

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def add(self, v):
        if isinstance(v, Variable):
            count = v.data.numel()
            v = v.data.sum()
        elif isinstance(v, torch.Tensor):
            count = v.numel()
            v = v.sum()

        self.n_count += count
        self.sum += v 
开发者ID:zzzDavid,项目名称:ICDAR-2019-SROIE,代码行数:12,代码来源:utils.py

示例9: asImg

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def asImg(tensor, size = None):
    """
        This function provides fast approach to transfer the image into numpy.ndarray
        This function only accept the output from sigmoid layer or hyperbolic tangent output

        Arg:    tensor  - The torch.Variable object, the rank format is BCHW or BHW
                size    - The tuple object, and the format is (height, width)
        Ret:    The numpy image, the rank format is BHWC
    """
    global channel_op
    result = tensor.detach()

    # 1. Judge the rank first
    if len(tensor.size()) == 3:
        result = torch.stack([result, result, result], 1)

    # 2. Judge the range of tensor (sigmoid output or hyperbolic tangent output)
    min_v = torch.min(result).cpu().data.numpy()
    max_v = torch.max(result).cpu().data.numpy()
    if max_v > 1.0 or min_v < -1.0:
        raise Exception('tensor value out of range...\t range is [' + str(min_v) + ' ~ ' + str(max_v))
    if min_v < 0:
        result = (result + 1) / 2

    # 3. Define the BCHW -> BHWC operation
    if channel_op is None:
        channel_op = Transpose(BCHW2BHWC)

    # 3. Rest               
    result = channel_op(result)
    result = result.cpu().data.numpy()
    if size is not None:
        result_list = []
        for img in result:
            result_list.append(transform.resize(img, (size[0], size[1]), mode = 'constant', order = 0) * 255)
        result = np.stack(result_list, axis = 0)
    else:
        result *= 255.
    result = result.astype(np.uint8)
    return result 
开发者ID:tomguluson92,项目名称:StyleGAN2_PyTorch,代码行数:42,代码来源:function.py

示例10: predictor_loss_function

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def predictor_loss_function(self, prediction, target, *args, **kwargs):
        """Pure abstract method that computes the loss.

        Args:
            prediction: Prediction that was made by the model of shape
                        [BATCH_SIZE, N_LABELS]
            target: Expected result of shape [BATCH_SIZE, N_OUTPUT_TOKENS]
        Returns:
            loss: This method should return the loss as a Tensor or Variable.
        """
        return torch.Tensor(float("Inf")) 
开发者ID:pytorch,项目名称:translate,代码行数:13,代码来源:word_prediction_criterion.py

示例11: analyse

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def analyse(net, inputs):
    """
    analyse the network given input
    :param net: torch.nn.Module
    :param inputs: torch.Variable, torch.Tensor or list of them
    :return: blob_dict, tracked_layers
    """
    del tracked_layers[:]
    del blob_dict[:]
    if inputs is not list:
        raw_inputs=[inputs]
    _inputs=[]
    for name,layer in net.named_modules():
        layer_name_dict[layer]=name
    for i in raw_inputs:
        if isinstance(i,Variable):
            _inputs.append(i)
        elif isinstance(i,torch.Tensor):
            _inputs.append(Variable(i))
        elif isinstance(i,np.ndarray):
            _inputs.append(Variable(torch.Tensor(i)))
        else:
            raise NotImplementedError("Not Support the input type {}".format(type(i)))
    net.apply(register)
    net.forward(*_inputs)
    return blob_dict,tracked_layers 
开发者ID:xxradon,项目名称:PytorchToCaffe,代码行数:28,代码来源:PytorchA.py

示例12: tovar

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def tovar(x, cuda):
    if cuda:
        return Variable(torch.FloatTensor(x).cuda())
    else:
        return Variable(torch.FloatTensor(x.astype(np.float64))) 
开发者ID:Orkis-Research,项目名称:Pytorch-Quaternion-Neural-Networks,代码行数:7,代码来源:copy_task.py

示例13: analyse

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def analyse(net, inputs):
    """
    analyse the network given input
    :param net: torch.nn.Module
    :param inputs: torch.Variable, torch.Tensor or list of them
    :return: blob_dict, tracked_layers
    """
    del tracked_layers[:]
    del blob_dict[:]
    if not isinstance(inputs,(list,tuple)):
        raw_inputs=[inputs]
    else:
        raw_inputs=inputs
    _inputs=[]
    for name,layer in net.named_modules():
        layer_name_dict[layer]=name
    for i in raw_inputs:
        if isinstance(i,Variable):
            _inputs.append(i)
        elif isinstance(i,torch.Tensor):
            _inputs.append(Variable(i))
        elif isinstance(i,np.ndarray):
            _inputs.append(Variable(torch.Tensor(i)))
        else:
            raise NotImplementedError("Not Support the input type {}".format(type(i)))
    net.apply(register)
    net.forward(*_inputs)
    for _,m in net.named_modules():
        m._forward_hooks.clear()
    print_by_layers(tracked_layers)
    return blob_dict,tracked_layers 
开发者ID:hahnyuan,项目名称:nn_tools,代码行数:33,代码来源:PytorchA.py

示例14: __define_variable

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def __define_variable(self, tensor, volatile=False):
        if volatile:
            with torch.no_grad():
                return Variable(tensor)

        return Variable(tensor) 
开发者ID:Wizaron,项目名称:instance-segmentation-pytorch,代码行数:8,代码来源:model.py

示例15: evaluate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import Variable [as 别名]
def evaluate(attention_model,x_test,y_test):
    """
        cv results
 
        Args:
            attention_model : {object} model
            x_test          : {nplist} x_test
            y_test          : {nplist} y_test
       
        Returns:
            cv-accuracy
 
      
    """
   
    attention_model.batch_size = x_test.shape[0]
    attention_model.hidden_state = attention_model.init_hidden()
    x_test_var = Variable(torch.from_numpy(x_test).type(torch.LongTensor))
    y_test_pred,_ = attention_model(x_test_var)
    if bool(attention_model.type):
        y_preds = torch.max(y_test_pred,1)[1]
        y_test_var = Variable(torch.from_numpy(y_test).type(torch.LongTensor))
       
    else:
        y_preds = torch.round(y_test_pred.type(torch.DoubleTensor).squeeze(1))
        y_test_var = Variable(torch.from_numpy(y_test).type(torch.DoubleTensor))
       
    return torch.eq(y_preds,y_test_var).data.sum()/x_test_var.size(0) 
开发者ID:kaushalshetty,项目名称:Structured-Self-Attention,代码行数:30,代码来源:train.py


注:本文中的torch.Variable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。