当前位置: 首页>>代码示例>>Python>>正文


Python torch.mm方法代码示例

本文整理汇总了Python中torch.mm方法的典型用法代码示例。如果您正苦于以下问题:Python torch.mm方法的具体用法?Python torch.mm怎么用?Python torch.mm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.mm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_grad

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def test_grad():
    input=tensor(([1,2,3],[4,5,6],[7,8,9]),dtype=torch.float)
    #weight=tensor(([0.1,0.2,0.3,0.4],[0.1,0.2,0.3,0.4],[0.1,0.2,0.3,0.4]),requires_grad=True)
    weight=tensor(torch.rand(3, 4),requires_grad=True)
    #input=input.unsqueeze(0)
    print(input,weight)
    pre=torch.mm(input,weight)
    #loss1=f.multilabel_soft_margin_loss()
    loss2=nn.MultiLabelMarginLoss()
    lable1=tensor(([0, 1, 1,0],),dtype=torch.float)
    lable2 = tensor(([0, 1, 1,0], [1, 0, 0,0], [1, 0,1 ,1]), dtype=torch.long)
    print(pre,lable1)
    loss1=f.multilabel_soft_margin_loss(pre,lable1,reduction='sum')
    loss1.backward()
    print('weight.grad.data1:',weight.grad.data)

    # loss2 = loss2(pre, lable2)
    # loss2.backward()
    # print('weight.grad.data2:', weight.grad.data) 
开发者ID:MagicChuyi,项目名称:SlowFast-Network-pytorch,代码行数:21,代码来源:test_con.py

示例2: fuse_conv_and_bn

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def fuse_conv_and_bn(conv, bn):
    # https://tehnokv.com/posts/fusing-batchnorm-and-conv/
    with torch.no_grad():
        # init
        fusedconv = torch.nn.Conv2d(conv.in_channels,
                                    conv.out_channels,
                                    kernel_size=conv.kernel_size,
                                    stride=conv.stride,
                                    padding=conv.padding,
                                    bias=True)

        # prepare filters
        w_conv = conv.weight.clone().view(conv.out_channels, -1)
        w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
        fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))

        # prepare spatial bias
        if conv.bias is not None:
            b_conv = conv.bias
        else:
            b_conv = torch.zeros(conv.weight.size(0))
        b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
        fusedconv.bias.copy_(b_conv + b_bn)

        return fusedconv 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:27,代码来源:torch_utils.py

示例3: cmmd

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def cmmd(source, target, s_label, t_label, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
    s_label = s_label.cpu()
    s_label = s_label.view(32,1)
    s_label = torch.zeros(32, 31).scatter_(1, s_label.data, 1)
    s_label = Variable(s_label).cuda()

    t_label = t_label.cpu()
    t_label = t_label.view(32, 1)
    t_label = torch.zeros(32, 31).scatter_(1, t_label.data, 1)
    t_label = Variable(t_label).cuda()

    batch_size = int(source.size()[0])
    kernels = guassian_kernel(source, target,
                              kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
    loss = 0
    XX = kernels[:batch_size, :batch_size]
    YY = kernels[batch_size:, batch_size:]
    XY = kernels[:batch_size, batch_size:]
    loss += torch.mean(torch.mm(s_label, torch.transpose(s_label, 0, 1)) * XX +
                      torch.mm(t_label, torch.transpose(t_label, 0, 1)) * YY -
                      2 * torch.mm(s_label, torch.transpose(t_label, 0, 1)) * XY)
    return loss 
开发者ID:jindongwang,项目名称:transferlearning,代码行数:24,代码来源:mmd.py

示例4: _mix_rbf_kernel

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def _mix_rbf_kernel(X, Y, sigma_list):
    assert(X.size(0) == Y.size(0))
    m = X.size(0)

    Z = torch.cat((X, Y), 0)
    ZZT = torch.mm(Z, Z.t())
    diag_ZZT = torch.diag(ZZT).unsqueeze(1)
    Z_norm_sqr = diag_ZZT.expand_as(ZZT)
    exponent = Z_norm_sqr - 2 * ZZT + Z_norm_sqr.t()

    K = 0.0
    for sigma in sigma_list:
        gamma = 1.0 / (2 * sigma**2)
        K += torch.exp(-gamma * exponent)

    return K[:m, :m], K[:m, m:], K[m:, m:], len(sigma_list) 
开发者ID:jindongwang,项目名称:transferlearning,代码行数:18,代码来源:mmd.py

示例5: __call__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def __call__(self, tensor):
        """
        Args:
            tensor (Tensor): Tensor image of size (C, H, W) to be whitened.

        Returns:
            Tensor: Transformed image.
        """
        if tensor.size(0) * tensor.size(1) * \
                tensor.size(2) != self.transformation_matrix.size(0):
            raise ValueError(
                "tensor and transformation matrix have incompatible shape." +
                "[{} x {} x {}] != ".format(
                    *
                    tensor.size()) +
                "{}".format(
                    self.transformation_matrix.size(0)))
        flat_tensor = tensor.view(1, -1) - self.mean_vector
        transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
        tensor = transformed_tensor.view(tensor.size())
        return tensor 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:23,代码来源:transforms.py

示例6: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def forward(self, query_embed, in_memory_embed, atten_mask=None):
        if self.atten_type == 'simple': # simple attention
            attention = torch.bmm(in_memory_embed, query_embed.unsqueeze(2)).squeeze(2)
        elif self.atten_type == 'mul': # multiplicative attention
            attention = torch.bmm(in_memory_embed, torch.mm(query_embed, self.W).unsqueeze(2)).squeeze(2)
        elif self.atten_type == 'add': # additive attention
            attention = torch.tanh(torch.mm(in_memory_embed.view(-1, in_memory_embed.size(-1)), self.W2)\
                .view(in_memory_embed.size(0), -1, self.W2.size(-1)) \
                + torch.mm(query_embed, self.W).unsqueeze(1))
            attention = torch.mm(attention.view(-1, attention.size(-1)), self.W3).view(attention.size(0), -1)
        else:
            raise RuntimeError('Unknown atten_type: {}'.format(self.atten_type))

        if atten_mask is not None:
            # Exclude masked elements from the softmax
            attention = atten_mask * attention - (1 - atten_mask) * INF
        return attention 
开发者ID:hugochan,项目名称:BAMnet,代码行数:19,代码来源:modules.py

示例7: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def forward(self, input_set):
    """
      Args:
        input_set: shape N X D

      Returns:
        output_vec: shape 1 X 2D
    """
    num_element = input_set.shape[0]
    element_dim = input_set.shape[1]
    assert element_dim == self.element_dim
    hidden = torch.zeros(1, 2 * self.element_dim).to(input_set.device)
    memory = torch.zeros(1, self.element_dim).to(input_set.device)

    for tt in range(self.num_step_encoder):
      hidden, memory = self.LSTM(hidden, memory)
      energy = torch.tanh(torch.mm(hidden, self.W_1) + input_set).mm(self.W_2)
      att_weight = F.softmax(energy, dim=0)
      read = (input_set * att_weight).sum(dim=0, keepdim=True)
      hidden = torch.cat([hidden, read], dim=1)

    return hidden 
开发者ID:lrjconan,项目名称:LanczosNetwork,代码行数:24,代码来源:set2set.py

示例8: pairwise_distance

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def pairwise_distance(features, query=None, gallery=None, metric=None):
    if query is None and gallery is None:
        n = len(features)
        x = torch.cat(list(features.values()))
        x = x.view(n, -1)
        if metric is not None:
            x = metric.transform(x)
        dist = torch.pow(x, 2).sum(dim=1, keepdim=True) * 2
        dist = dist.expand(n, n) - 2 * torch.mm(x, x.t())
        return dist

    x = torch.cat([features["".join(f)].unsqueeze(0) for f, _, _, _ in query], 0)
    y = torch.cat([features["".join(f)].unsqueeze(0) for f, _, _, _ in gallery], 0)
    m, n = x.size(0), y.size(0)
    x = x.view(m, -1)
    y = y.view(n, -1)
    if metric is not None:
        x = metric.transform(x)
        y = metric.transform(y)
    dist = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \
           torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    dist.addmm_(1, -2, x, y.t())
    return dist 
开发者ID:gddingcs,项目名称:Dispersion-based-Clustering,代码行数:25,代码来源:evaluators.py

示例9: calculate_regression_loss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def calculate_regression_loss(self, z, target):
        """
        Calculating the regression loss for all pairs of nodes.
        :param z: Hidden vertex representations.
        :param target: Target vector.
        :return loss_term: Regression loss.
        :return predictions_soft: Predictions for each vertex pair.
        """
        pos = torch.cat((self.positive_z_i, self.positive_z_j), 1)
        neg = torch.cat((self.negative_z_i, self.negative_z_j), 1)

        surr_neg_i = torch.cat((self.negative_z_i, self.negative_z_k), 1)
        surr_neg_j = torch.cat((self.negative_z_j, self.negative_z_k), 1)
        surr_pos_i = torch.cat((self.positive_z_i, self.positive_z_k), 1)
        surr_pos_j = torch.cat((self.positive_z_j, self.positive_z_k), 1)

        features = torch.cat((pos, neg, surr_neg_i, surr_neg_j, surr_pos_i, surr_pos_j))
        predictions = torch.mm(features, self.regression_weights)
        predictions_soft = F.log_softmax(predictions, dim=1)
        loss_term = F.nll_loss(predictions_soft, target)
        return loss_term, predictions_soft 
开发者ID:benedekrozemberczki,项目名称:SGCN,代码行数:23,代码来源:sgcn.py

示例10: score_model

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def score_model(self, epoch):
        """
        Score the model on the test set edges in each epoch.
        :param epoch: Epoch number.
        """
        loss, self.train_z = self.model(self.positive_edges, self.negative_edges, self.y)
        score_positive_edges = torch.from_numpy(np.array(self.test_positive_edges, dtype=np.int64).T).type(torch.long).to(self.device)
        score_negative_edges = torch.from_numpy(np.array(self.test_negative_edges, dtype=np.int64).T).type(torch.long).to(self.device)
        test_positive_z = torch.cat((self.train_z[score_positive_edges[0, :], :], self.train_z[score_positive_edges[1, :], :]), 1)
        test_negative_z = torch.cat((self.train_z[score_negative_edges[0, :], :], self.train_z[score_negative_edges[1, :], :]), 1)
        scores = torch.mm(torch.cat((test_positive_z, test_negative_z), 0), self.model.regression_weights.to(self.device))
        probability_scores = torch.exp(F.softmax(scores, dim=1))
        predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)
        predictions = predictions.cpu().detach().numpy()
        targets = [0]*len(self.test_positive_edges) + [1]*len(self.test_negative_edges)
        auc, f1 = calculate_auc(targets, predictions, self.edges)
        self.logs["performance"].append([epoch+1, auc, f1]) 
开发者ID:benedekrozemberczki,项目名称:SGCN,代码行数:19,代码来源:sgcn.py

示例11: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def __init__(self, input1_size, input2_size, output_size):
        """
        使用版本
        :param input1_size:
        :param input2_size:
        :param output_size:双仿的分类空间
        """
        super().__init__()
        # 为什么+1:
        # 双仿变换的矩阵形式:
        # [(batch_size*seq_len),(head_feat_size+1)] * [(head_feat_size+1),((dep_feat_size+1))*output_size]
        #       mm-> [(batch_size*seq_len),((dep_feat_size+1))*output_size]
        # [(batch_size*seq_len),((dep_feat_size+1))*output_size]
        #       view-> [batch_size, (seq_len*output_size), (dep_feat_size+1)]
        # [batch_size, (seq_len*output_size), (dep_feat_size+1)] * [batch_size, (dep_feat_size+1), seq_len]
        #       bmm-> [batch_size, (seq_len*output_size), seq_len]
        # [batch_size, (seq_len*output_size), seq_len]
        #       view-> [batch_size, seq_len, seq_len, output_size]
        self.W_bilin = PairwiseBilinear(input1_size + 1, input2_size + 1, output_size)

        self.W_bilin.weight.data.zero_()
        self.W_bilin.bias.data.zero_() 
开发者ID:NLPInBLCU,项目名称:BiaffineDependencyParsing,代码行数:24,代码来源:biaffine.py

示例12: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def forward(self, input, adj):
        h = torch.mm(input, self.W)
        N = h.size()[0]

        f_1 = torch.matmul(h, self.a1)
        f_2 = torch.matmul(h, self.a2)
        e = self.leakyrelu(f_1 + f_2.transpose(0,1))

        zero_vec = -9e15*torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)
        attention = F.softmax(attention, dim=1)
        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, h)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime 
开发者ID:meliketoy,项目名称:graph-cnn.pytorch,代码行数:20,代码来源:layers.py

示例13: _power_iteration

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def _power_iteration(self, A, num_simulations=30):
        # Ideally choose a random vector
        # To decrease the chance that our vector
        # Is orthogonal to the eigenvector
        b_k = torch.rand(A.shape[1]).unsqueeze(dim=1) * 0.5 - 1

        for _ in range(num_simulations):
            # calculate the matrix-by-vector product Ab
            b_k1 = torch.mm(A, b_k)

            # calculate the norm
            b_k1_norm = torch.norm(b_k1)

            # re normalize the vector
            b_k = b_k1 / b_k1_norm

        return b_k 
开发者ID:diningphil,项目名称:gnn-comparison,代码行数:19,代码来源:manager.py

示例14: _vertex_decimation

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def _vertex_decimation(self, L):

        max_eigenvec = self._power_iteration(L)
        v_plus, v_minus = (max_eigenvec >= 0).squeeze(), (max_eigenvec < 0).squeeze()

        # print(v_plus, v_minus)

        # diagonal matrix, swap v_minus with v_plus not to incur in errors (does not change the matrix)
        if torch.sum(v_plus) == 0.:  # The matrix is diagonal, cannot reduce further
            if torch.sum(v_minus) == 0.:
                assert v_minus.shape[0] == L.shape[0], (v_minus.shape, L.shape)
                # I assumed v_minus should have ones, but this is not necessarily the case. So I added this if
                return torch.ones(v_minus.shape), L
            else:
                return v_minus, L

        L_plus_plus = L[v_plus][:, v_plus]
        L_plus_minus = L[v_plus][:, v_minus]
        L_minus_minus = L[v_minus][:, v_minus]
        L_minus_plus = L[v_minus][:, v_plus]

        L_new = L_plus_plus - torch.mm(torch.mm(L_plus_minus, torch.inverse(L_minus_minus)), L_minus_plus)

        return v_plus, L_new 
开发者ID:diningphil,项目名称:gnn-comparison,代码行数:26,代码来源:manager.py

示例15: regularizer_orth2

# 需要导入模块: import torch [as 别名]
# 或者: from torch import mm [as 别名]
def regularizer_orth2(m):
    """
    # ----------------------------------------
    # Applies regularization to the training by performing the
    # orthogonalization technique described in the paper
    # This function is to be called by the torch.nn.Module.apply() method,
    # which applies svd_orthogonalization() to every layer of the model.
    # usage: net.apply(regularizer_orth2)
    # ----------------------------------------
    """
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        w = m.weight.data.clone()
        c_out, c_in, f1, f2 = w.size()
        # dtype = m.weight.data.type()
        w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
        u, s, v = torch.svd(w)
        s_mean = s.mean()
        s[s > 1.5*s_mean] = s[s > 1.5*s_mean] - 1e-4
        s[s < 0.5*s_mean] = s[s < 0.5*s_mean] + 1e-4
        w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
        m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1)  # .type(dtype)
    else:
        pass 
开发者ID:cszn,项目名称:KAIR,代码行数:26,代码来源:utils_regularizers.py


注:本文中的torch.mm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。