当前位置: 首页>>代码示例>>Python>>正文


Python torch.reshape方法代码示例

本文整理汇总了Python中torch.reshape方法的典型用法代码示例。如果您正苦于以下问题:Python torch.reshape方法的具体用法?Python torch.reshape怎么用?Python torch.reshape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.reshape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _fspecial_gauss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def _fspecial_gauss(window_size, sigma=1.5):
    # Function to mimic the 'fspecial' gaussian MATLAB function.
    coords = np.arange(0, window_size, dtype=np.float32)
    coords -= (window_size - 1) / 2.0

    g = coords ** 2
    g *= (-0.5 / (sigma ** 2))
    g = np.reshape(g, (1, -1)) + np.reshape(g, (-1, 1))
    g = torch.from_numpy(np.reshape(g, (1, -1)))
    g = torch.softmax(g, dim=1)
    g = g / g.sum()
    return g


# 2019.05.26. butterworth filter.
# ref: http://www.cnblogs.com/laumians-notes/p/8592968.html 
开发者ID:tensorboy,项目名称:centerpose,代码行数:18,代码来源:losses.py

示例2: create_window

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def create_window(window_size, channel=3, sigma=1.5, gauss='original', n=2):
    if gauss == 'original':
        _1D_window = gaussian(window_size, sigma).unsqueeze(1)
        _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
        window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
        return window
    elif gauss == 'butterworth':
        _1D_window = butterworth(window_size, sigma, n).unsqueeze(1)
        _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
        window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
        return window
    else:
        g = _fspecial_gauss(window_size, sigma)
        g = torch.reshape(g, (1, 1, window_size, window_size))
        # 2019.06.05.
        # https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853
        g = tile(g, 0, 3)
        return g 
开发者ID:tensorboy,项目名称:centerpose,代码行数:20,代码来源:losses.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def forward(self, x, state):
        c, h = state

        gates = self.gates(torch.cat([x, h], 1))

        if self.layer_norm is not None:
            combined = self.layer_norm(
                torch.reshape(gates, [-1, 4, self.output_size]))
        else:
            combined = torch.reshape(gates, [-1, 4, self.output_size])

        i, j, f, o = torch.unbind(combined, 1)
        i, f, o = torch.sigmoid(i), torch.sigmoid(f), torch.sigmoid(o)

        new_c = f * c + i * torch.tanh(j)

        if self.activation is None:
            # Do not use tanh activation
            new_h = o * new_c
        else:
            new_h = o * self.activation(new_c)

        return new_h, (new_c, new_h) 
开发者ID:XMUNLP,项目名称:Tagger,代码行数:25,代码来源:recurrent.py

示例4: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def forward(self, x):
        x_shape = list(x.shape)
        if(len(x_shape) == 5):
          [N, C, D, H, W] = x_shape
          new_shape = [N*D, C, H, W]
          x = torch.transpose(x, 1, 2)
          x = torch.reshape(x, new_shape)
        x0 = self.in_conv(x)
        x1 = self.down1(x0)
        x2 = self.down2(x1)
        x3 = self.down3(x2)
        x4 = self.down4(x3)
        
        x = self.up1(x4, x3)
        x = self.up2(x, x2)
        x = self.up3(x, x1)
        x = self.up4(x, x0)
        output = self.out_conv(x)

        if(len(x_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
        return output 
开发者ID:HiLab-git,项目名称:PyMIC,代码行数:26,代码来源:my_net2d.py

示例5: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def forward(self, x):
        x_shape = list(x.shape)
        if(self.dim == 2 and len(x_shape) == 5):
            [N, C, D, H, W] = x_shape
            new_shape = [N*D, C, H, W]
            x = torch.transpose(x, 1, 2)
            x = torch.reshape(x, new_shape)
        output = self.conv(x)
        if(self.downsample):
            output_d = self.down_layer(output)
        else:
            output_d = None 
        if(self.dim == 2 and len(x_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
            if(self.downsample):
                new_shape = [N, D] + list(output_d.shape)[1:]
                output_d = torch.reshape(output_d, new_shape)
                output_d = torch.transpose(output_d, 1, 2)

        return output, output_d 
开发者ID:HiLab-git,项目名称:PyMIC,代码行数:24,代码来源:unet2d5.py

示例6: reshape_prediction_and_ground_truth

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def reshape_prediction_and_ground_truth(predict, soft_y):
    """
    reshape input variables of shape [B, C, D, H, W] to [voxel_n, C]
    """
    tensor_dim = len(predict.size())
    num_class  = list(predict.size())[1]
    if(tensor_dim == 5):
        soft_y  = soft_y.permute(0, 2, 3, 4, 1)
        predict = predict.permute(0, 2, 3, 4, 1)
    elif(tensor_dim == 4):
        soft_y  = soft_y.permute(0, 2, 3, 1)
        predict = predict.permute(0, 2, 3, 1)
    else:
        raise ValueError("{0:}D tensor not supported".format(tensor_dim))
    
    predict = torch.reshape(predict, (-1, num_class)) 
    soft_y  = torch.reshape(soft_y,  (-1, num_class))
      
    return predict, soft_y 
开发者ID:HiLab-git,项目名称:PyMIC,代码行数:21,代码来源:loss.py

示例7: photometricLossgray

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def photometricLossgray(colorImg_gray, depthImg, albedoImg_gray, 
                        mask, lighting_est, device, K, thres):
    
    N,C,H,W = colorImg_gray.size()
    
    # color loss
    normals, _ = lighting.depthToNormalBatch(depthImg, device, K, thres)
    SHs     = lighting.normalToSHBatch(normals,device)
    
    SHs    = torch.reshape(SHs, (N, H*W, 9))
    lighting_est = torch.reshape(lighting_est, (N, 9, 1))
    
    #SHs to [B, H*W,9] lighting [B, 9, 1] --[N, H*W] --[B,H,W,1]             
    color_shading = torch.bmm(SHs, lighting_est) # N H*W 1   
    color_shading = torch.reshape(color_shading, (N, H, W))
    
    mask1 = torch.reshape(mask[:,0,:,:], (N,H,W)) # one layer mask
    color_pre  = mask1 * (color_shading * albedoImg_gray) # N*H*W
    colorImg_gray_mask = mask1 * colorImg_gray # mask
    
    colorloss = F.l1_loss(color_pre, colorImg_gray_mask) # NHW size directly
        
    return colorloss, color_pre

# come from hmr-src/util/image.py 
开发者ID:zhuhao-nju,项目名称:hmd,代码行数:27,代码来源:utility.py

示例8: decode_ord

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def decode_ord(self, y):
        batch_size, prob, height, width = y.shape
        y = torch.reshape(y, (batch_size, prob//2, 2, height, width))
        denominator = torch.sum(torch.exp(y), 2)
        pred_score = torch.div(torch.exp(y[:, :, 1, :, :]), denominator)
        return pred_score 
开发者ID:miraiaroha,项目名称:ACAN,代码行数:8,代码来源:model.py

示例9: group_std

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def group_std(x: torch.Tensor, groups=32, eps=1e-5):
    n, c, h, w = x.size()
    x = torch.reshape(x, (n, groups, c // groups, h, w))
    var = torch.var(x, dim=(2, 3, 4), keepdim=True)
    std = torch.sqrt(var + eps)
    return torch.reshape(std, (n, c, h, w)) 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:8,代码来源:functional.py

示例10: tile

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def tile(x, n):
    if not utils.is_positive_int(n):
        raise TypeError('Argument \'n\' must be a positive integer.')
    x_ = x.reshape(-1)
    x_ = x_.repeat(n)
    x_ = x_.reshape(n, -1)
    x_ = x_.transpose(1, 0)
    x_ = x_.reshape(-1)
    return x_ 
开发者ID:bayesiains,项目名称:nsf,代码行数:11,代码来源:torchutils.py

示例11: split_leading_dim

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def split_leading_dim(x, shape):
    """Reshapes the leading dim of `x` to have the given shape."""
    new_shape = torch.Size(shape) + x.shape[1:]
    return torch.reshape(x, new_shape) 
开发者ID:bayesiains,项目名称:nsf,代码行数:6,代码来源:torchutils.py

示例12: merge_leading_dims

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def merge_leading_dims(x, num_dims):
    """Reshapes the tensor `x` such that the first `num_dims` dimensions are merged to one."""
    if not utils.is_positive_int(num_dims):
        raise TypeError('Number of leading dims must be a positive integer.')
    if num_dims > x.dim():
        raise ValueError('Number of leading dims can\'t be greater than total number of dims.')
    new_shape = torch.Size([-1]) + x.shape[num_dims:]
    return torch.reshape(x, new_shape) 
开发者ID:bayesiains,项目名称:nsf,代码行数:10,代码来源:torchutils.py

示例13: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def forward(self, representation_dict):
        """
        Forward pass through adaptation network. Returns classification parameters for task.
        :param representation_dict: (dict::torch.tensors) Dictionary containing class-level representations for each
                                    class in the task.
        :return: (dict::torch.tensors) Dictionary containing the weights and biases for the classification of each class
                 in the task. Model can extract parameters and build the classifier accordingly. Supports sampling if
                 ML-PIP objective is desired.
        """
        classifier_param_dict = {}
        class_weight_means = []
        class_bias_means = []

        # Extract and sort the label set for the task
        label_set = list(representation_dict.keys())
        label_set.sort()
        num_classes = len(label_set)

        # For each class, extract the representation and pass it through adaptation network to generate classification
        # params for that class. Store parameters in a list,
        for class_num in label_set:
            nu = representation_dict[class_num]
            class_weight_means.append(self.weight_means_processor(nu))
            class_bias_means.append(self.bias_means_processor(nu))

        # Save the parameters as torch tensors (matrix and vector) and add to dictionary
        classifier_param_dict['weight_mean'] = torch.cat(class_weight_means, dim=0)
        classifier_param_dict['bias_mean'] = torch.reshape(torch.cat(class_bias_means, dim=1), [num_classes, ])

        return classifier_param_dict 
开发者ID:cambridge-mlg,项目名称:cnaps,代码行数:32,代码来源:adaptation_networks.py

示例14: _extract_class_indices

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def _extract_class_indices(labels, which_class):
        """
        Helper method to extract the indices of elements which have the specified label.
        :param labels: (torch.tensor) Labels of the context set.
        :param which_class: Label for which indices are extracted.
        :return: (torch.tensor) Indices in the form of a mask that indicate the locations of the specified label.
        """
        class_mask = torch.eq(labels, which_class)  # binary mask of labels equal to which_class
        class_mask_indices = torch.nonzero(class_mask)  # indices of labels equal to which class
        return torch.reshape(class_mask_indices, (-1,))  # reshape to be a 1D vector 
开发者ID:cambridge-mlg,项目名称:cnaps,代码行数:12,代码来源:model.py

示例15: train

# 需要导入模块: import torch [as 别名]
# 或者: from torch import reshape [as 别名]
def train(model, trainloader, trainset, epoch, num_epochs, batch_size, lr, use_cuda, in_shape):
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    optimizer = optim.SGD(model.parameters(), lr=learning_rate(lr, epoch), momentum=0.9, weight_decay=5e-4)

    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])

    print('|  Number of Trainable Parameters: ' + str(params))
    print('\n=> Training Epoch #%d, LR=%.4f' % (epoch, learning_rate(lr, epoch)))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()  # GPU settings
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        out, out_bij = model(inputs)               # Forward Propagation
        loss = criterion(out, targets)  # Loss
        loss.backward()  # Backward Propagation
        optimizer.step()  # Optimizer update

        try:
            loss.data[0]
        except IndexError:
            loss.data = torch.reshape(loss.data, (1,))
        train_loss += loss.data[0]
        _, predicted = torch.max(out.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        sys.stdout.write('\r')
        sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%'
                         % (epoch, num_epochs, batch_idx+1,
                            (len(trainset)//batch_size)+1, loss.data[0], 100.*correct/total))
        sys.stdout.flush() 
开发者ID:jhjacobsen,项目名称:pytorch-i-revnet,代码行数:38,代码来源:utils_cifar.py


注:本文中的torch.reshape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。