當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.cosine_similarity方法代碼示例

本文整理匯總了Python中torch.cosine_similarity方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.cosine_similarity方法的具體用法?Python torch.cosine_similarity怎麽用?Python torch.cosine_similarity使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.cosine_similarity方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: layer_rotation

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosine_similarity [as 別名]
def layer_rotation(current_params, init_params):
    """
    Example:
        >>> import netharn as nh
        >>> model = nh.models.ToyNet2d()
        >>> model2 = nh.models.ToyNet2d()
        >>> init_params = _get_named_params(model)
        >>> current_params = _get_named_params(model2)
        >>> ret = layer_rotation(current_params, init_params)

    """
    ret = []
    for (n1, p1), (n2, p2) in zip(current_params, init_params):
        assert n1 == n2, "{} vs {}".format(n1, n2)
        sim = torch.cosine_similarity(p1.reshape(-1), p2.reshape(-1), dim=0).item()
        dist = 1.0 - sim
        ret.append((n1, dist))
    return ret 
開發者ID:Erotemic,項目名稱:netharn,代碼行數:20,代碼來源:layer_rotation.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosine_similarity [as 別名]
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
        reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
        rep_a, rep_b = reps

        output = torch.cosine_similarity(rep_a, rep_b)
        loss_fct = nn.MSELoss()

        if labels is not None:
            loss = loss_fct(output, labels.view(-1))
            return loss
        else:
            return reps, output 
開發者ID:UKPLab,項目名稱:sentence-transformers,代碼行數:14,代碼來源:CosineSimilarityLoss.py

示例3: semantic_regularization

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosine_similarity [as 別名]
def semantic_regularization(
    features, targets, similarity_matrix, margin=None, aggreg="mean", factor=1.0, metric="cosine"
):
    pair_indexes = []

    np_targets = targets.cpu().numpy()

    for index, target in enumerate(np_targets):
        neg_indexes = np.where(np_targets != target)[0]
        neg_index = np.random.choice(neg_indexes)
        pair_indexes.append(tuple(sorted((index, neg_index))))

    pair_indexes_ = list(set(pair_indexes))
    pair_indexes = torch.tensor(pair_indexes_).long()

    left = features[pair_indexes[..., 0]]
    right = features[pair_indexes[..., 1]]
    if metric == "cosine":
        similarities = F.cosine_similarity(left, right)

        if margin is not None:
            margins = torch.ones_like(similarities) * margin
        else:
            margins = similarity_matrix[targets[pair_indexes[..., 0]], targets[pair_indexes[...,
                                                                                            1]]]

        hinges = torch.clamp(similarities - margins, min=0.)

        return factor * _aggreg(hinges, aggreg, features_dim=features.shape[1])
    elif metric == "gor":
        similarities = torch.sum(torch.mul(left, right), 1)
        return factor * _aggreg(similarities, aggreg, features_dim=features.shape[1])
    elif metric == "snr":
        noise = left - right
        var_noise = noise.var(axis=1, unbiased=True)
        var_anchor = right.var(axis=1, unbiased=True)

        dist = torch.mean(var_anchor / var_noise)
        return factor * dist
    else:
        raise NotImplementedError(f"Unknown metric: {metric}.") 
開發者ID:arthurdouillard,項目名稱:incremental_learning.pytorch,代碼行數:43,代碼來源:zil.py

示例4: cluster

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosine_similarity [as 別名]
def cluster(data, k, temp, num_iter, init = None, cluster_temp=5):
    '''
    pytorch (differentiable) implementation of soft k-means clustering.
    '''
    #normalize x so it lies on the unit sphere
    data = torch.diag(1./torch.norm(data, p=2, dim=1)) @ data
    #use kmeans++ initialization if nothing is provided
    if init is None:
        data_np = data.detach().numpy()
        norm = (data_np**2).sum(axis=1)
        init = sklearn.cluster.k_means_._k_init(data_np, k, norm, sklearn.utils.check_random_state(None))
        init = torch.tensor(init, requires_grad=True)
        if num_iter == 0: return init
    mu = init
    n = data.shape[0]
    d = data.shape[1]
#    data = torch.diag(1./torch.norm(data, dim=1, p=2))@data
    for t in range(num_iter):
        #get distances between all data points and cluster centers
#        dist = torch.cosine_similarity(data[:, None].expand(n, k, d).reshape((-1, d)), mu[None].expand(n, k, d).reshape((-1, d))).reshape((n, k))
        dist = data @ mu.t()
        #cluster responsibilities via softmax
        r = torch.softmax(cluster_temp*dist, 1)
        #total responsibility of each cluster
        cluster_r = r.sum(dim=0)
        #mean of points in each cluster weighted by responsibility
        cluster_mean = (r.t().unsqueeze(1) @ data.expand(k, *data.shape)).squeeze(1)
        #update cluster means
        new_mu = torch.diag(1/cluster_r) @ cluster_mean
        mu = new_mu
    dist = data @ mu.t()
    r = torch.softmax(cluster_temp*dist, 1)
    return mu, r, dist 
開發者ID:bwilder0,項目名稱:clusternet,代碼行數:35,代碼來源:models.py

示例5: forward_gmmn

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosine_similarity [as 別名]
def forward_gmmn(self, visual_features, semantic_features, class_id, words, metrics):
        loss = mmd(real=visual_features, fake=semantic_features, **self.gmmn_config["mmd"])

        if self.gmmn_config.get("old_mmd") and self._old_word_embeddings is not None:
            old_unseen_limit = self._n_classes - self._task_size

            if not self.gmmn_config["old_mmd"].get(
                "apply_unseen", False
            ) and class_id >= old_unseen_limit:
                return loss
            with torch.no_grad():
                old_semantic_features = self._old_word_embeddings(words)

            factor = self.gmmn_config["old_mmd"]["factor"]
            _type = self.gmmn_config["old_mmd"].get("type", "mmd")
            if _type == "mmd":
                old_loss = factor * mmd(
                    real=old_semantic_features, fake=semantic_features, **self.gmmn_config["mmd"]
                )
            elif _type == "kl":
                old_loss = factor * F.kl_div(
                    semantic_features, old_semantic_features, reduction="batchmean"
                )
            elif _type == "l2":
                old_loss = factor * torch.pairwise_distance(
                    semantic_features, old_semantic_features, p=2
                ).mean()
            elif _type == "cosine":
                old_loss = factor * (
                    1 - torch.cosine_similarity(semantic_features, old_semantic_features)
                ).mean()
            else:
                raise ValueError(f"Unknown distillation: {_type}.")

            if self.gmmn_config.get("scheduled"):
                old_loss = old_loss * math.sqrt(self._n_classes / self._task_size)

            metrics["old"] += old_loss.item()
            return loss + old_loss
        return loss 
開發者ID:arthurdouillard,項目名稱:incremental_learning.pytorch,代碼行數:42,代碼來源:zil.py

示例6: _pair_distance

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import cosine_similarity [as 別名]
def _pair_distance(a, b, distance_type="l2"):
    if distance_type == "l2":
        return F.pairwise_distance(a, b, p=2)
    if distance_type == "l2squared":
        return torch.pow(F.pairwise_distance(a, b, p=2), 2)
    elif distance_type == "l1":
        return F.pairwise_distance(a, b, p=1)
    elif distance_type == "cosine":
        return 1 - torch.cosine_similarity(a, b)

    raise ValueError("Unknown distance type {}.".format(distance_type)) 
開發者ID:arthurdouillard,項目名稱:incremental_learning.pytorch,代碼行數:13,代碼來源:metrics.py


注:本文中的torch.cosine_similarity方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。