當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.reshape方法代碼示例

本文整理匯總了Python中torch.reshape方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.reshape方法的具體用法?Python torch.reshape怎麽用?Python torch.reshape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.reshape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _fspecial_gauss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def _fspecial_gauss(window_size, sigma=1.5):
    # Function to mimic the 'fspecial' gaussian MATLAB function.
    coords = np.arange(0, window_size, dtype=np.float32)
    coords -= (window_size - 1) / 2.0

    g = coords ** 2
    g *= (-0.5 / (sigma ** 2))
    g = np.reshape(g, (1, -1)) + np.reshape(g, (-1, 1))
    g = torch.from_numpy(np.reshape(g, (1, -1)))
    g = torch.softmax(g, dim=1)
    g = g / g.sum()
    return g


# 2019.05.26. butterworth filter.
# ref: http://www.cnblogs.com/laumians-notes/p/8592968.html 
開發者ID:tensorboy,項目名稱:centerpose,代碼行數:18,代碼來源:losses.py

示例2: create_window

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def create_window(window_size, channel=3, sigma=1.5, gauss='original', n=2):
    if gauss == 'original':
        _1D_window = gaussian(window_size, sigma).unsqueeze(1)
        _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
        window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
        return window
    elif gauss == 'butterworth':
        _1D_window = butterworth(window_size, sigma, n).unsqueeze(1)
        _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
        window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
        return window
    else:
        g = _fspecial_gauss(window_size, sigma)
        g = torch.reshape(g, (1, 1, window_size, window_size))
        # 2019.06.05.
        # https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853
        g = tile(g, 0, 3)
        return g 
開發者ID:tensorboy,項目名稱:centerpose,代碼行數:20,代碼來源:losses.py

示例3: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def forward(self, x, state):
        c, h = state

        gates = self.gates(torch.cat([x, h], 1))

        if self.layer_norm is not None:
            combined = self.layer_norm(
                torch.reshape(gates, [-1, 4, self.output_size]))
        else:
            combined = torch.reshape(gates, [-1, 4, self.output_size])

        i, j, f, o = torch.unbind(combined, 1)
        i, f, o = torch.sigmoid(i), torch.sigmoid(f), torch.sigmoid(o)

        new_c = f * c + i * torch.tanh(j)

        if self.activation is None:
            # Do not use tanh activation
            new_h = o * new_c
        else:
            new_h = o * self.activation(new_c)

        return new_h, (new_c, new_h) 
開發者ID:XMUNLP,項目名稱:Tagger,代碼行數:25,代碼來源:recurrent.py

示例4: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def forward(self, x):
        x_shape = list(x.shape)
        if(len(x_shape) == 5):
          [N, C, D, H, W] = x_shape
          new_shape = [N*D, C, H, W]
          x = torch.transpose(x, 1, 2)
          x = torch.reshape(x, new_shape)
        x0 = self.in_conv(x)
        x1 = self.down1(x0)
        x2 = self.down2(x1)
        x3 = self.down3(x2)
        x4 = self.down4(x3)
        
        x = self.up1(x4, x3)
        x = self.up2(x, x2)
        x = self.up3(x, x1)
        x = self.up4(x, x0)
        output = self.out_conv(x)

        if(len(x_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
        return output 
開發者ID:HiLab-git,項目名稱:PyMIC,代碼行數:26,代碼來源:my_net2d.py

示例5: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def forward(self, x):
        x_shape = list(x.shape)
        if(self.dim == 2 and len(x_shape) == 5):
            [N, C, D, H, W] = x_shape
            new_shape = [N*D, C, H, W]
            x = torch.transpose(x, 1, 2)
            x = torch.reshape(x, new_shape)
        output = self.conv(x)
        if(self.downsample):
            output_d = self.down_layer(output)
        else:
            output_d = None 
        if(self.dim == 2 and len(x_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
            if(self.downsample):
                new_shape = [N, D] + list(output_d.shape)[1:]
                output_d = torch.reshape(output_d, new_shape)
                output_d = torch.transpose(output_d, 1, 2)

        return output, output_d 
開發者ID:HiLab-git,項目名稱:PyMIC,代碼行數:24,代碼來源:unet2d5.py

示例6: reshape_prediction_and_ground_truth

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def reshape_prediction_and_ground_truth(predict, soft_y):
    """
    reshape input variables of shape [B, C, D, H, W] to [voxel_n, C]
    """
    tensor_dim = len(predict.size())
    num_class  = list(predict.size())[1]
    if(tensor_dim == 5):
        soft_y  = soft_y.permute(0, 2, 3, 4, 1)
        predict = predict.permute(0, 2, 3, 4, 1)
    elif(tensor_dim == 4):
        soft_y  = soft_y.permute(0, 2, 3, 1)
        predict = predict.permute(0, 2, 3, 1)
    else:
        raise ValueError("{0:}D tensor not supported".format(tensor_dim))
    
    predict = torch.reshape(predict, (-1, num_class)) 
    soft_y  = torch.reshape(soft_y,  (-1, num_class))
      
    return predict, soft_y 
開發者ID:HiLab-git,項目名稱:PyMIC,代碼行數:21,代碼來源:loss.py

示例7: photometricLossgray

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def photometricLossgray(colorImg_gray, depthImg, albedoImg_gray, 
                        mask, lighting_est, device, K, thres):
    
    N,C,H,W = colorImg_gray.size()
    
    # color loss
    normals, _ = lighting.depthToNormalBatch(depthImg, device, K, thres)
    SHs     = lighting.normalToSHBatch(normals,device)
    
    SHs    = torch.reshape(SHs, (N, H*W, 9))
    lighting_est = torch.reshape(lighting_est, (N, 9, 1))
    
    #SHs to [B, H*W,9] lighting [B, 9, 1] --[N, H*W] --[B,H,W,1]             
    color_shading = torch.bmm(SHs, lighting_est) # N H*W 1   
    color_shading = torch.reshape(color_shading, (N, H, W))
    
    mask1 = torch.reshape(mask[:,0,:,:], (N,H,W)) # one layer mask
    color_pre  = mask1 * (color_shading * albedoImg_gray) # N*H*W
    colorImg_gray_mask = mask1 * colorImg_gray # mask
    
    colorloss = F.l1_loss(color_pre, colorImg_gray_mask) # NHW size directly
        
    return colorloss, color_pre

# come from hmr-src/util/image.py 
開發者ID:zhuhao-nju,項目名稱:hmd,代碼行數:27,代碼來源:utility.py

示例8: decode_ord

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def decode_ord(self, y):
        batch_size, prob, height, width = y.shape
        y = torch.reshape(y, (batch_size, prob//2, 2, height, width))
        denominator = torch.sum(torch.exp(y), 2)
        pred_score = torch.div(torch.exp(y[:, :, 1, :, :]), denominator)
        return pred_score 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:8,代碼來源:model.py

示例9: group_std

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def group_std(x: torch.Tensor, groups=32, eps=1e-5):
    n, c, h, w = x.size()
    x = torch.reshape(x, (n, groups, c // groups, h, w))
    var = torch.var(x, dim=(2, 3, 4), keepdim=True)
    std = torch.sqrt(var + eps)
    return torch.reshape(std, (n, c, h, w)) 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:8,代碼來源:functional.py

示例10: tile

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def tile(x, n):
    if not utils.is_positive_int(n):
        raise TypeError('Argument \'n\' must be a positive integer.')
    x_ = x.reshape(-1)
    x_ = x_.repeat(n)
    x_ = x_.reshape(n, -1)
    x_ = x_.transpose(1, 0)
    x_ = x_.reshape(-1)
    return x_ 
開發者ID:bayesiains,項目名稱:nsf,代碼行數:11,代碼來源:torchutils.py

示例11: split_leading_dim

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def split_leading_dim(x, shape):
    """Reshapes the leading dim of `x` to have the given shape."""
    new_shape = torch.Size(shape) + x.shape[1:]
    return torch.reshape(x, new_shape) 
開發者ID:bayesiains,項目名稱:nsf,代碼行數:6,代碼來源:torchutils.py

示例12: merge_leading_dims

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def merge_leading_dims(x, num_dims):
    """Reshapes the tensor `x` such that the first `num_dims` dimensions are merged to one."""
    if not utils.is_positive_int(num_dims):
        raise TypeError('Number of leading dims must be a positive integer.')
    if num_dims > x.dim():
        raise ValueError('Number of leading dims can\'t be greater than total number of dims.')
    new_shape = torch.Size([-1]) + x.shape[num_dims:]
    return torch.reshape(x, new_shape) 
開發者ID:bayesiains,項目名稱:nsf,代碼行數:10,代碼來源:torchutils.py

示例13: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def forward(self, representation_dict):
        """
        Forward pass through adaptation network. Returns classification parameters for task.
        :param representation_dict: (dict::torch.tensors) Dictionary containing class-level representations for each
                                    class in the task.
        :return: (dict::torch.tensors) Dictionary containing the weights and biases for the classification of each class
                 in the task. Model can extract parameters and build the classifier accordingly. Supports sampling if
                 ML-PIP objective is desired.
        """
        classifier_param_dict = {}
        class_weight_means = []
        class_bias_means = []

        # Extract and sort the label set for the task
        label_set = list(representation_dict.keys())
        label_set.sort()
        num_classes = len(label_set)

        # For each class, extract the representation and pass it through adaptation network to generate classification
        # params for that class. Store parameters in a list,
        for class_num in label_set:
            nu = representation_dict[class_num]
            class_weight_means.append(self.weight_means_processor(nu))
            class_bias_means.append(self.bias_means_processor(nu))

        # Save the parameters as torch tensors (matrix and vector) and add to dictionary
        classifier_param_dict['weight_mean'] = torch.cat(class_weight_means, dim=0)
        classifier_param_dict['bias_mean'] = torch.reshape(torch.cat(class_bias_means, dim=1), [num_classes, ])

        return classifier_param_dict 
開發者ID:cambridge-mlg,項目名稱:cnaps,代碼行數:32,代碼來源:adaptation_networks.py

示例14: _extract_class_indices

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def _extract_class_indices(labels, which_class):
        """
        Helper method to extract the indices of elements which have the specified label.
        :param labels: (torch.tensor) Labels of the context set.
        :param which_class: Label for which indices are extracted.
        :return: (torch.tensor) Indices in the form of a mask that indicate the locations of the specified label.
        """
        class_mask = torch.eq(labels, which_class)  # binary mask of labels equal to which_class
        class_mask_indices = torch.nonzero(class_mask)  # indices of labels equal to which class
        return torch.reshape(class_mask_indices, (-1,))  # reshape to be a 1D vector 
開發者ID:cambridge-mlg,項目名稱:cnaps,代碼行數:12,代碼來源:model.py

示例15: train

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import reshape [as 別名]
def train(model, trainloader, trainset, epoch, num_epochs, batch_size, lr, use_cuda, in_shape):
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    optimizer = optim.SGD(model.parameters(), lr=learning_rate(lr, epoch), momentum=0.9, weight_decay=5e-4)

    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])

    print('|  Number of Trainable Parameters: ' + str(params))
    print('\n=> Training Epoch #%d, LR=%.4f' % (epoch, learning_rate(lr, epoch)))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()  # GPU settings
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        out, out_bij = model(inputs)               # Forward Propagation
        loss = criterion(out, targets)  # Loss
        loss.backward()  # Backward Propagation
        optimizer.step()  # Optimizer update

        try:
            loss.data[0]
        except IndexError:
            loss.data = torch.reshape(loss.data, (1,))
        train_loss += loss.data[0]
        _, predicted = torch.max(out.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        sys.stdout.write('\r')
        sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%'
                         % (epoch, num_epochs, batch_idx+1,
                            (len(trainset)//batch_size)+1, loss.data[0], 100.*correct/total))
        sys.stdout.flush() 
開發者ID:jhjacobsen,項目名稱:pytorch-i-revnet,代碼行數:38,代碼來源:utils_cifar.py


注:本文中的torch.reshape方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。