當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.unsqueeze方法代碼示例

本文整理匯總了Python中torch.unsqueeze方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.unsqueeze方法的具體用法?Python torch.unsqueeze怎麽用?Python torch.unsqueeze使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.unsqueeze方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: m_ggnn

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def m_ggnn(self, h_v, h_w, e_vw, opt={}):

        m = Variable(torch.zeros(h_w.size(0), h_w.size(1), self.args['out']).type_as(h_w.data))

        for w in range(h_w.size(1)):
            if torch.nonzero(e_vw[:, w, :].data).size():
                for i, el in enumerate(self.args['e_label']):
                    ind = (el == e_vw[:,w,:]).type_as(self.learn_args[0][i])

                    parameter_mat = self.learn_args[0][i][None, ...].expand(h_w.size(0), self.learn_args[0][i].size(0),
                                                                            self.learn_args[0][i].size(1))

                    m_w = torch.transpose(torch.bmm(torch.transpose(parameter_mat, 1, 2),
                                                                        torch.transpose(torch.unsqueeze(h_w[:, w, :], 1),
                                                                                        1, 2)), 1, 2)
                    m_w = torch.squeeze(m_w)
                    m[:,w,:] = ind.expand_as(m_w)*m_w
        return m 
開發者ID:priba,項目名稱:nmp_qc,代碼行數:20,代碼來源:MessageFunction.py

示例2: colorize

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def colorize(x):
    ''' Converts a one-channel grayscale image to a color heatmap image '''
    if x.dim() == 2:
        torch.unsqueeze(x, 0, out=x)
    if x.dim() == 3:
        cl = torch.zeros([3, x.size(1), x.size(2)])
        cl[0] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
        cl[1] = gauss(x,1,.5,.3)
        cl[2] = gauss(x,1,.2,.3)
        cl[cl.gt(1)] = 1
    elif x.dim() == 4:
        cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
        cl[:,0,:,:] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
        cl[:,1,:,:] = gauss(x,1,.5,.3)
        cl[:,2,:,:] = gauss(x,1,.2,.3)
    return cl 
開發者ID:zhunzhong07,項目名稱:Random-Erasing,代碼行數:18,代碼來源:visualize.py

示例3: getclassAccuracy

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def getclassAccuracy(output, target, nclasses, topk=(1,)):
    """
    Computes the top-k accuracy between output and target and aggregates it by class
    :param output: output vector from the network
    :param target: ground-truth
    :param nclasses: nclasses in the problem
    :param topk: Top-k results desired, i.e. top1, top2, top5
    :return: topk vectors aggregated by class
    """
    maxk = max(topk)

    score, label_index = output.topk(k=maxk, dim=1, largest=True, sorted=True)
    correct = label_index.eq(torch.unsqueeze(target, 1))

    ClassAccuracyRes = []
    for k in topk:
        ClassAccuracy = torch.zeros([1, nclasses], dtype=torch.uint8).cuda()
        correct_k = correct[:, :k].sum(1)
        for n in range(target.shape[0]):
            ClassAccuracy[0, target[n]] += correct_k[n].byte()
        ClassAccuracyRes.append(ClassAccuracy)

    return ClassAccuracyRes 
開發者ID:vpulab,項目名稱:Semantic-Aware-Scene-Recognition,代碼行數:25,代碼來源:utils.py

示例4: query

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def query(self, images):
        if self.pool_size == 0:
            return images
        return_images = []
        for image in images.data:
            image = torch.unsqueeze(image, 0)
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size-1)
                    tmp = self.images[random_id].clone()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:
                    return_images.append(image)
        return_images = Variable(torch.cat(return_images, 0))
        return return_images

# Initialize fake image pools 
開發者ID:AlexiaJM,項目名稱:Deep-learning-with-cats,代碼行數:25,代碼來源:CycleGAN.py

示例5: init_parameters

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def init_parameters(self):
        # Init weights
        if self.init_method == 'x': # Xavier            
            torch.nn.init.xavier_uniform_(self.weight)
        elif self.init_method == 'k': # Kaiming
            torch.nn.init.kaiming_uniform_(self.weight)
        elif self.init_method == 'p': # Poisson
            mu=self.kernel_size[0]/2 
            dist = poisson(mu)
            x = np.arange(0, self.kernel_size[0])
            y = np.expand_dims(dist.pmf(x),1)
            w = signal.convolve2d(y, y.transpose(), 'full')
            w = torch.Tensor(w).type_as(self.weight)
            w = torch.unsqueeze(w,0)
            w = torch.unsqueeze(w,1)
            w = w.repeat(self.out_channels, 1, 1, 1)
            w = w.repeat(1, self.in_channels, 1, 1)
            self.weight.data = w + torch.rand(w.shape)
            
        # Init bias
        self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01)
        
        
# Non-negativity enforcement class 
開發者ID:abdo-eldesokey,項目名稱:nconv,代碼行數:26,代碼來源:nconv.py

示例6: navg_layer

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1, out_channels=1, initalizer='x', pos=False, groups=1):
        
        navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, 
                         padding=(kernel_size[0]//2, kernel_size[1]//2), bias=False, groups=groups)
        
        weights = navg.weight            
        
        if initalizer == 'x': # Xavier            
            torch.nn.init.xavier_uniform(weights)
        elif initalizer == 'k':    
            torch.nn.init.kaiming_uniform(weights)
        elif initalizer == 'p':    
            mu=kernel_size[0]/2 
            dist = poisson(mu)
            x = np.arange(0, kernel_size[0])
            y = np.expand_dims(dist.pmf(x),1)
            w = signal.convolve2d(y, y.transpose(), 'full')
            w = torch.FloatTensor(w).cuda()
            w = torch.unsqueeze(w,0)
            w = torch.unsqueeze(w,1)
            w = w.repeat(out_channels, 1, 1, 1)
            w = w.repeat(1, in_channels, 1, 1)
            weights.data = w + torch.rand(w.shape).cuda()
         
        return navg 
開發者ID:abdo-eldesokey,項目名稱:nconv,代碼行數:27,代碼來源:unguided_network.py

示例7: convolutional_layer

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def convolutional_layer(self, inputs):
        convolution_all = []
        conv_wts = []
        for i in range(self.seq_len):
            convolution_one_month = []
            for j in range(self.pad_size):
                convolution = self.conv(torch.unsqueeze(inputs[:, i, j], dim=1))
                convolution_one_month.append(convolution)
            convolution_one_month = torch.stack(convolution_one_month)
            convolution_one_month = torch.squeeze(convolution_one_month, dim=3)
            convolution_one_month = torch.transpose(convolution_one_month, 0, 1)
            convolution_one_month = torch.transpose(convolution_one_month, 1, 2)
            convolution_one_month = torch.squeeze(convolution_one_month, dim=1)
            convolution_one_month = self.func_tanh(convolution_one_month)
            convolution_one_month = torch.unsqueeze(convolution_one_month, dim=1)
            vec = torch.bmm(convolution_one_month, inputs[:, i])
            convolution_all.append(vec)
            conv_wts.append(convolution_one_month)
        convolution_all = torch.stack(convolution_all, dim=1)
        convolution_all = torch.squeeze(convolution_all, dim=2)
        conv_wts = torch.squeeze(torch.stack(conv_wts, dim=1), dim=2)
        return convolution_all, conv_wts 
開發者ID:BarnesLab,項目名稱:Patient2Vec,代碼行數:24,代碼來源:Patient2Vec.py

示例8: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def forward(self, data):
        # Implement Equation 4.2 of the paper i.e. concat all layers' graph representations and apply linear model
        # note: this can be decomposed in one smaller linear model per layer
        x, edge_index, batch = data.x, data.edge_index, data.batch

        hidden_repres = []

        for conv in self.convs:
            x = torch.tanh(conv(x, edge_index))
            hidden_repres.append(x)

        # apply sortpool
        x_to_sortpool = torch.cat(hidden_repres, dim=1)
        x_1d = global_sort_pool(x_to_sortpool, batch, self.k)  # in the code the authors sort the last channel only

        # apply 1D convolutional layers
        x_1d = torch.unsqueeze(x_1d, dim=1)
        conv1d_res = F.relu(self.conv1d_params1(x_1d))
        conv1d_res = self.maxpool1d(conv1d_res)
        conv1d_res = F.relu(self.conv1d_params2(conv1d_res))
        conv1d_res = conv1d_res.reshape(conv1d_res.shape[0], -1)

        # apply dense layer
        out_dense = self.dense_layer(conv1d_res)
        return out_dense 
開發者ID:diningphil,項目名稱:gnn-comparison,代碼行數:27,代碼來源:DGCNN.py

示例9: calc_iou

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def calc_iou(a, b):
    area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])

    iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])
    ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])

    iw = torch.clamp(iw, min=0)
    ih = torch.clamp(ih, min=0)

    ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih

    ua = torch.clamp(ua, min=1e-8)

    intersection = iw * ih

    IoU = intersection / ua

    return IoU 
開發者ID:tristandb,項目名稱:EfficientDet-PyTorch,代碼行數:20,代碼來源:losses.py

示例10: batch2tensor

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def batch2tensor(batch_adj, batch_feat, node_per_pool_graph):
    """
    transform a batched graph to batched adjacency tensor and node feature tensor
    """
    batch_size = int(batch_adj.size()[0] / node_per_pool_graph)
    adj_list = []
    feat_list = []
    for i in range(batch_size):
        start = i * node_per_pool_graph
        end = (i + 1) * node_per_pool_graph
        adj_list.append(batch_adj[start:end, start:end])
        feat_list.append(batch_feat[start:end, :])
    adj_list = list(map(lambda x: th.unsqueeze(x, 0), adj_list))
    feat_list = list(map(lambda x: th.unsqueeze(x, 0), feat_list))
    adj = th.cat(adj_list, dim=0)
    feat = th.cat(feat_list, dim=0)

    return feat, adj 
開發者ID:dmlc,項目名稱:dgl,代碼行數:20,代碼來源:model_utils.py

示例11: masked_softmax

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def masked_softmax(matrix, mask, dim=-1, memory_efficient=True,
                   mask_fill_value=-1e32):
    '''
    masked_softmax for dgl batch graph
    code snippet contributed by AllenNLP (https://github.com/allenai/allennlp)
    '''
    if mask is None:
        result = th.nn.functional.softmax(matrix, dim=dim)
    else:
        mask = mask.float()
        while mask.dim() < matrix.dim():
            mask = mask.unsqueeze(1)
        if not memory_efficient:
            result = th.nn.functional.softmax(matrix * mask, dim=dim)
            result = result * mask
            result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
        else:
            masked_matrix = matrix.masked_fill((1 - mask).byte(),
                                               mask_fill_value)
            result = th.nn.functional.softmax(masked_matrix, dim=dim)
    return result 
開發者ID:dmlc,項目名稱:dgl,代碼行數:23,代碼來源:model_utils.py

示例12: query

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def query(self, images):
        if self.pool_size == 0:
            return Variable(images)
        return_images = []
        for image in images:
            image = torch.unsqueeze(image, 0)
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size-1)
                    tmp = self.images[random_id].clone()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:
                    return_images.append(image)
        return_images = Variable(torch.cat(return_images, 0))
        return return_images 
開發者ID:aayushbansal,項目名稱:Recycle-GAN,代碼行數:23,代碼來源:image_pool.py

示例13: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def forward(self, h, r, t):
        h_emb, r_emb, t_emb = self.embed(h, r, t)
        first_dimen = list(h_emb.shape)[0]
        
        stacked_h = torch.unsqueeze(h_emb, dim=1)
        stacked_r = torch.unsqueeze(r_emb, dim=1)
        stacked_t = torch.unsqueeze(t_emb, dim=1)

        stacked_hrt = torch.cat([stacked_h, stacked_r, stacked_t], dim=1)
        stacked_hrt = torch.unsqueeze(stacked_hrt, dim=1)  # [b, 1, 3, k]

        stacked_hrt = [conv_layer(stacked_hrt) for conv_layer in self.conv_list]
        stacked_hrt = torch.cat(stacked_hrt, dim=3)
        stacked_hrt = stacked_hrt.view(first_dimen, -1)
        preds = self.fc1(stacked_hrt)
        preds = torch.squeeze(preds, dim=-1)
        return preds 
開發者ID:Sujit-O,項目名稱:pykg2vec,代碼行數:19,代碼來源:pointwise.py

示例14: train_layer

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def train_layer(self, h, t):
        """ Defines the forward pass training layers of the algorithm.

            Args:
               h (Tensor): Head entities ids.
               t (Tensor): Tail entity ids of the triple.
        """
        
        mr1h = torch.matmul(h, self.mr1.weight) # h => [m, self.ent_hidden_size], self.mr1 => [self.ent_hidden_size, self.rel_hidden_size]
        mr2t = torch.matmul(t, self.mr2.weight) # t => [m, self.ent_hidden_size], self.mr2 => [self.ent_hidden_size, self.rel_hidden_size]

        expanded_h = h.unsqueeze(dim=0).repeat(self.rel_hidden_size, 1, 1) # [self.rel_hidden_size, m, self.ent_hidden_size]
        expanded_t = t.unsqueeze(dim=-1) # [m, self.ent_hidden_size, 1]

        temp = (torch.matmul(expanded_h, self.mr.weight.view(self.rel_hidden_size, self.ent_hidden_size, self.ent_hidden_size))).permute(1, 0, 2) # [m, self.rel_hidden_size, self.ent_hidden_size]
        htmrt = torch.squeeze(torch.matmul(temp, expanded_t), dim=-1) # [m, self.rel_hidden_size]

        return F.tanh(htmrt + mr1h + mr2t + self.br.weight) 
開發者ID:Sujit-O,項目名稱:pykg2vec,代碼行數:20,代碼來源:pairwise.py

示例15: pool2d

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unsqueeze [as 別名]
def pool2d(tensor, kernel_size: int = 2, stride: int = 2, mode="max"):
    assert len(tensor.shape) < 5
    if len(tensor.shape) == 2:
        return _pool(tensor, kernel_size, stride, mode)
    if len(tensor.shape) == 3:
        return torch.squeeze(pool2d(torch.unsqueeze(tensor, dim=0), kernel_size, stride, mode))
    batches = tensor.shape[0]
    channels = tensor.shape[1]
    out_shape = (
        batches,
        channels,
        (tensor.shape[2] - kernel_size) // stride + 1,
        (tensor.shape[3] - kernel_size) // stride + 1,
    )
    result = []
    for batch in range(batches):
        for channel in range(channels):
            result.append(_pool(tensor[batch][channel], kernel_size, stride, mode))
    result = torch.stack(result).reshape(out_shape)
    return result 
開發者ID:OpenMined,項目名稱:PySyft,代碼行數:22,代碼來源:functional.py


注:本文中的torch.unsqueeze方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。