當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.mv方法代碼示例

本文整理匯總了Python中torch.mv方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.mv方法的具體用法?Python torch.mv怎麽用?Python torch.mv使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.mv方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: compute_one_iter

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def compute_one_iter(self):
        if not self.initialized:
            raise ValueError('Layer needs to be initialized first.')
        domain, codomain = self.compute_domain_codomain()
        if self.kernel_size == (1, 1):
            u = self.u.detach()
            v = self.v.detach()
            weight = self.weight.detach().view(self.out_channels, self.in_channels)
            u = normalize_u(torch.mv(weight, v), codomain)
            v = normalize_v(torch.mv(weight.t(), u), domain)
            return torch.dot(u, torch.mv(weight, v))
        else:
            u = self.u.detach()
            v = self.v.detach()
            weight = self.weight.detach()
            c, h, w = self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item())
            u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
            out_shape = u_s.shape
            u = normalize_u(u_s.view(-1), codomain)
            v_s = F.conv_transpose2d(
                u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0
            )
            v = normalize_v(v_s.view(-1), domain)
            weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
            return torch.dot(u.view(-1), weight_v.view(-1)) 
開發者ID:rtqichen,項目名稱:residual-flows,代碼行數:27,代碼來源:mixed_lipschitz.py

示例2: unify_sentence

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def unify_sentence(self, sentence_feature, one_sentence_embedding):
        """
            Unify Sentence By Token Importance
        """
        sent_len = one_sentence_embedding.size()[0]

        var_token = torch.zeros(sent_len, device=one_sentence_embedding.device)
        for token_index in range(sent_len):
            token_feature = sentence_feature[:, token_index, :]
            sim_map = self.cosine_similarity_torch(token_feature)
            var_token[token_index] = torch.var(sim_map.diagonal(-1))

        var_token = var_token / torch.sum(var_token)
        sentence_embedding = torch.mv(one_sentence_embedding.t(), var_token)

        return sentence_embedding 
開發者ID:UKPLab,項目名稱:sentence-transformers,代碼行數:18,代碼來源:WKPooling.py

示例3: C3

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def C3():
    n = 10
    A = torch.zeros(n, n)
    A.view(-1)[::n + 1] = -2
    A.view(-1)[n::n + 1] = 1
    A.view(-1)[1::n + 1] = 1

    def diffeq(t, y):
        return torch.mv(A, y)

    def init():
        y0 = torch.zeros(n)
        y0[0] = 1
        return torch.tensor(0.), y0

    return diffeq, init, None 
開發者ID:rtqichen,項目名稱:torchdiffeq,代碼行數:18,代碼來源:detest.py

示例4: C4

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def C4():
    n = 51
    A = torch.zeros(n, n)
    A.view(-1)[::n + 1] = -2
    A.view(-1)[n::n + 1] = 1
    A.view(-1)[1::n + 1] = 1

    def diffeq(t, y):
        return torch.mv(A, y)

    def init():
        y0 = torch.zeros(n)
        y0[0] = 1
        return torch.tensor(0.), y0

    return diffeq, init, None 
開發者ID:rtqichen,項目名稱:torchdiffeq,代碼行數:18,代碼來源:detest.py

示例5: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def forward(self, *args):
        r"""Computes the output of the ``module`` and appies spectral normalization to the
        ``name`` attribute of the ``module``.

        Returns:
            The output of the ``module``.
        """
        height = self.w_bar.data.shape[0]
        for _ in range(self.power_iterations):
            self.v.data = self._l2normalize(
                torch.mv(torch.t(self.w_bar.view(height, -1)), self.u)
            )
            self.u.data = self._l2normalize(
                torch.mv(self.w_bar.view(height, -1), self.v)
            )
        sigma = self.u.dot(self.w_bar.view(height, -1).mv(self.v))
        setattr(self.module, self.name, self.w_bar / sigma.expand_as(self.w_bar))
        return self.module.forward(*args) 
開發者ID:torchgan,項目名稱:torchgan,代碼行數:20,代碼來源:spectralnorm.py

示例6: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def forward(self, z):
        # Create uhat such that it is parallel to w
        uw = torch.dot(self.u, self.w)
        muw = -1 + F.softplus(uw)
        uhat = self.u + (muw - uw) * torch.transpose(self.w, 0, -1) / torch.sum(self.w ** 2)

        # Equation 21 - Transform z
        zwb = torch.mv(z, self.w) + self.b

        f_z = z + (uhat.view(1, -1) * F.tanh(zwb).view(-1, 1))

        # Compute the Jacobian using the fact that
        # tanh(x) dx = 1 - tanh(x)**2
        psi = (1 - F.tanh(zwb)**2).view(-1, 1) * self.w.view(1, -1)
        psi_u = torch.mv(psi, uhat)

        # Return the transformed output along
        # with log determninant of J
        logdet_jacobian = torch.log(torch.abs(1 + psi_u) + 1e-8)

        return f_z, logdet_jacobian 
開發者ID:wohlert,項目名稱:semi-supervised-pytorch,代碼行數:23,代碼來源:flow.py

示例7: _oscar_prox_jacobian

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def _oscar_prox_jacobian(y_star, dout=None):
    y_star = y_star.numpy()
    dim = y_star.shape[0]
    J = torch.zeros(dim, dim)

    _, inv, counts = np.unique(np.abs(y_star),
                               return_inverse=True,
                               return_counts=True)

    for i in range(dim):
        for j in range(dim):
            if (inv[i] == inv[j] and
                    y_star[i] != 0):
                J[i, j] = (np.sign(y_star[i]) * np.sign(y_star[j])
                           / counts[inv[i]])
    if dout is not None:
        return torch.mv(J, dout)
    else:
        return J 
開發者ID:vene,項目名稱:sparse-structured-attention,代碼行數:21,代碼來源:test_oscar.py

示例8: _fused_prox_jacobian

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def _fused_prox_jacobian(y_hat, dout=None):
    """reference naive implementation: construct the jacobian"""
    dim = y_hat.shape[0]
    groups = torch.zeros(dim)
    J = torch.zeros(dim, dim)
    current_group = 0

    for i in range(1, dim):
        if y_hat[i] == y_hat[i - 1]:
            groups[i] = groups[i - 1]
        else:
            current_group += 1
            groups[i] = current_group

    for i in range(dim):
        for j in range(dim):
            if groups[i] == groups[j]:
                n_fused = (groups == groups[i]).sum()
                J[i, j] = 1 / n_fused.to(y_hat.dtype)

    if dout is not None:
        return torch.mv(J, dout)
    else:
        return J 
開發者ID:vene,項目名稱:sparse-structured-attention,代碼行數:26,代碼來源:test_fused.py

示例9: backward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def backward(self, grad_output):
        input, weight, bias = self.saved_tensors

        grad_input = grad_weight = grad_bias = None
        if self.needs_input_grad[0]:
            grad_output = grad_output.squeeze()
            grad_input = torch.mm(grad_output, weight)
        if self.needs_input_grad[1]:
            grad_weight = torch.mm(grad_output.t(), input)
        if bias is not None and self.needs_input_grad[2]:
            grad_bias = torch.mv(grad_output.t(), self.add_buffer)

        if bias is not None:
            return grad_input, grad_weight, grad_bias
        else:
            return grad_input, grad_weight 
開發者ID:JannerM,項目名稱:spatial-reasoning,代碼行數:18,代碼來源:Linear_custom.py

示例10: dot_nd

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def dot_nd(query, candidates):
    """
    Perform a dot product between a query and n-dimensional candidates.
    :param query: (Tensor) A vector to query, whose size is
                  (query_dim,)
    :param candidates: (Tensor) A n-dimensional tensor to be multiplied
                       by query, whose size is (d0, d1, ..., dn, query_dim)
    :returns: The result of the dot product, whose size is
              (d0, d1, ..., dn)
    """

    cands_size = candidates.size()
    cands_flat = candidates.view(-1, cands_size[-1])
    output_flat = torch.mv(cands_flat, query)
    output = output_flat.view(*cands_size[:-1])
    return output 
開發者ID:BangLiu,項目名稱:QANet-PyTorch,代碼行數:18,代碼來源:treelstm_utils.py

示例11: dot_nd

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def dot_nd(query, candidates):
    """
    Perform a dot product between a query and n-dimensional candidates.

    Args:
        query (Variable): A vector to query, whose size is
            (query_dim,)
        candidates (Variable): A n-dimensional tensor to be multiplied
            by query, whose size is (d0, d1, ..., dn, query_dim)

    Returns:
        output: The result of the dot product, whose size is
            (d0, d1, ..., dn)
    """

    cands_size = candidates.size()
    cands_flat = candidates.view(-1, cands_size[-1])
    output_flat = torch.mv(cands_flat, query)
    output = output_flat.view(*cands_size[:-1])
    return output 
開發者ID:ExplorerFreda,項目名稱:TreeEnc,代碼行數:22,代碼來源:basic.py

示例12: cos_nd

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def cos_nd(query, candidates):
    """
        Perform cosine between a query and n-dimensional candidates.

        Args:
            query (Variable): A vector to query, whose size is
                (query_dim,)
            candidates (Variable): A n-dimensional tensor to be multiplied
                by query, whose size is (d0, d1, ..., dn, query_dim)

        Returns:
            output: The result of the cosine operator, whose size is
                (d0, d1, ..., dn)
        """

    cands_size = candidates.size()
    cands_flat = candidates.view(-1, cands_size[-1])
    output_flat = torch.mv(cands_flat, query)
    output = output_flat.view(*cands_size[:-1])
    lengths = (torch.sum(candidates ** 2, dim=-1) + 1e-10) ** 0.5
    lengths = lengths.contiguous().view(output.size())
    output = output / lengths / ((torch.sum(query ** 2) + 1e-10) ** 0.5)
    return output 
開發者ID:ExplorerFreda,項目名稱:TreeEnc,代碼行數:25,代碼來源:basic.py

示例13: _update_u_v

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def _update_u_v(self):
        u = getattr(self.module, self.name + "_u")
        v = getattr(self.module, self.name + "_v")
        w = getattr(self.module, self.name + "_bar")

        height = w.data.shape[0]
        for _ in range(self.power_iterations):
            v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
            u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))

        # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
        sigma = u.dot(w.view(height, -1).mv(v))
        setattr(self.module, self.name, w / sigma.expand_as(w)) 
開發者ID:joelmoniz,項目名稱:DepthNets,代碼行數:15,代碼來源:spectral_normalization.py

示例14: sn_weight

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def sn_weight(weight, u, height, n_power_iterations):
    weight.requires_grad_(False)
    for _ in range(n_power_iterations):
        v = l2normalize(torch.mv(weight.view(height, -1).t(), u))
        u = l2normalize(torch.mv(weight.view(height, -1), v))

    weight.requires_grad_(True)
    sigma = u.dot(weight.view(height, -1).mv(v))
    return torch.div(weight, sigma), u 
開發者ID:rdevon,項目名稱:cortex,代碼行數:11,代碼來源:SpectralNormLayer.py

示例15: unify_token

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import mv [as 別名]
def unify_token(self, token_feature):
        """
            Unify Token Representation
        """
        window_size = self.context_window_size

        alpha_alignment = torch.zeros(token_feature.size()[0], device=token_feature.device)
        alpha_novelty = torch.zeros(token_feature.size()[0], device=token_feature.device)

        for k in range(token_feature.size()[0]):
            left_window = token_feature[k - window_size:k, :]
            right_window = token_feature[k + 1:k + window_size + 1, :]
            window_matrix = torch.cat([left_window, right_window, token_feature[k, :][None, :]])
            Q, R = torch.qr(window_matrix.T)

            r = R[:, -1]
            alpha_alignment[k] = torch.mean(self.norm_vector(R[:-1, :-1], dim=0), dim=1).matmul(R[:-1, -1]) / torch.norm(r[:-1])
            alpha_alignment[k] = 1 / (alpha_alignment[k] * window_matrix.size()[0] * 2)
            alpha_novelty[k] = torch.abs(r[-1]) / torch.norm(r)

        # Sum Norm
        alpha_alignment = alpha_alignment / torch.sum(alpha_alignment)  # Normalization Choice
        alpha_novelty = alpha_novelty / torch.sum(alpha_novelty)

        alpha = alpha_novelty + alpha_alignment
        alpha = alpha / torch.sum(alpha)  # Normalize

        out_embedding = torch.mv(token_feature.t(), alpha)
        return out_embedding 
開發者ID:UKPLab,項目名稱:sentence-transformers,代碼行數:31,代碼來源:WKPooling.py


注:本文中的torch.mv方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。