當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.CosineSimilarity方法代碼示例

本文整理匯總了Python中torch.nn.CosineSimilarity方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.CosineSimilarity方法的具體用法?Python nn.CosineSimilarity怎麽用?Python nn.CosineSimilarity使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.CosineSimilarity方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: avg_score

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def avg_score(self, inputs_d, inputs_q, mask_d, mask_q):
        attn_q = self.attn(inputs_q)
        attn_d = self.attn(inputs_d)

        q_embed = self.word_emb(inputs_q)
        d_embed = self.word_emb(inputs_d)
        
        q_embed_norm = F.normalize(q_embed, 2, 2)
        d_embed_norm = F.normalize(d_embed, 2, 2)

        mask_d = mask_d.view(mask_d.size()[0], mask_d.size()[1], 1)
        mask_q = mask_q.view(mask_q.size()[0], mask_q.size()[1], 1)

        length_q = torch.sum(mask_q, dim=1)

        avgq = torch.sum(q_embed_norm * mask_q * attn_q, 1) / length_q.view(length_q.shape[0], 1)

        length_d = torch.sum(mask_d, dim=1)
        avgd = torch.sum(d_embed_norm * mask_d * attn_d, 1) / length_d.view(length_d.shape[0], 1)
        
        pdist = nn.CosineSimilarity()

        output = pdist(avgq, avgd).unsqueeze(1)

        return output 
開發者ID:thunlp,項目名稱:Kernel-Based-Neural-Ranking-Models,代碼行數:27,代碼來源:AVGPOOL.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def __init__(self, user_embeds, item_embeds):
        super(EmbedMatcher, self).__init__()
        self.user_embeds = user_embeds
        self.item_embeds = item_embeds
        self.similarity = nn.CosineSimilarity(dim=1) 
開發者ID:THUDM,項目名稱:ScenarioMeta,代碼行數:7,代碼來源:meta.py

示例3: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def forward(self, support_users, support_items):
        support_users = self.useritem_embeds(*support_users, is_user=True, with_neighbor=False)
        support_items = self.useritem_embeds(*support_items, is_user=False, with_neighbor=False)
        support_embeds = torch.cat((support_users, support_items), dim=1)  # (batch_size, embed_size * 2)
        support_embeds = self.support_proj(support_embeds)
        support_embeds = F.relu(support_embeds)
        _, (h, c) = self.lstm(support_embeds.unsqueeze(1))
        h = h.view(1, -1)
        user_attn = self.user_attn(h).view(self.layer_num, 2, -1)
        for i in range(self.layer_num):
            self.model.user_gcn.attn_heads[i].configure(user_attn[i])
        item_attn = self.item_attn(h).view(self.layer_num, 2, -1)
        for i in range(self.layer_num):
            self.model.item_gcn.attn_heads[i].configure(item_attn[i])

# class Matcher(nn.Module):
#     def __init__(self, user_embeds, item_embeds, support_encoder_config, query_encoder_config):
#         super(Matcher, self).__init__()
#         self.user_embeds = user_embeds
#         self.item_embeds = item_embeds
#         self.support_encoder = SupportEncoder(**support_encoder_config)
#         self.query_encoder = QueryEncoder(**query_encoder_config)
#         self.similarity = nn.CosineSimilarity(dim=1)
#
#     def forward(self, query_users, query_items, support_users, support_items):
#         """
#         :param query_users: (batch_size,)
#         :param query_items: (batch_size,)
#         :param support_users: (few_size,)
#         :param support_items: (few_size,)
#         :return: (batch_size, )
#         """
#         query_embeds = torch.cat((self.user_embeds(query_users), self.item_embeds(query_items)), dim=1)
#         support_embeds = torch.cat((self.user_embeds(support_users), self.item_embeds(support_items)), dim=1)
#         support_embeds = self.support_encoder(support_embeds)
#         query_embeds = self.query_encoder(query_embeds, support_embeds)
#         query_embeds = query_embeds.unsqueeze(-1)
#         support_embeds = support_embeds.transpose(0, 1).unsqueeze(0)
#         return self.similarity(query_embeds, support_embeds).mean(dim=1) 
開發者ID:THUDM,項目名稱:ScenarioMeta,代碼行數:41,代碼來源:meta.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def __init__(self, kernel_size=1, stride=1, D=1, simfun=None):
        super(Corr1d, self).__init__()
        
        self.kernel_size = kernel_size
        self.stride = stride
        self.D = D
        if(simfun is None):
            self.simfun = self.simfun_default
        else: # such as simfun = nn.CosineSimilarity(dim=1)
            self.simfun = simfun 
開發者ID:wyf2017,項目名稱:DSMnet,代碼行數:12,代碼來源:util_conv.py

示例5: stereo

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def stereo(self, mol_batch, mol_vec):
        stereo_cands,batch_idx = [],[]
        labels = []
        for i,mol_tree in enumerate(mol_batch):
            cands = mol_tree.stereo_cands
            if len(cands) == 1: continue
            if mol_tree.smiles3D not in cands:
                cands.append(mol_tree.smiles3D)
            stereo_cands.extend(cands)
            batch_idx.extend([i] * len(cands))
            labels.append( (cands.index(mol_tree.smiles3D), len(cands)) )

        if len(labels) == 0: 
            return create_var(torch.zeros(1)), 1.0

        batch_idx = create_var(torch.LongTensor(batch_idx))
        stereo_cands = self.mpn(mol2graph(stereo_cands))
        stereo_cands = self.G_mean(stereo_cands)
        stereo_labels = mol_vec.index_select(0, batch_idx)
        scores = torch.nn.CosineSimilarity()(stereo_cands, stereo_labels)

        st,acc = 0,0
        all_loss = []
        for label,le in labels:
            cur_scores = scores.narrow(0, st, le)
            if cur_scores.data[label] >= cur_scores.max().data[0]: 
                acc += 1
            label = create_var(torch.LongTensor([label]))
            all_loss.append( self.stereo_loss(cur_scores.view(1,-1), label) )
            st += le
        all_loss = sum(all_loss) / len(labels)
        return all_loss, acc * 1.0 / len(labels) 
開發者ID:wengong-jin,項目名稱:icml18-jtnn,代碼行數:34,代碼來源:jtprop_vae.py

示例6: decode

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def decode(self, tree_vec, mol_vec, prob_decode):
        pred_root,pred_nodes = self.decoder.decode(tree_vec, prob_decode)

        #Mark nid & is_leaf & atommap
        for i,node in enumerate(pred_nodes):
            node.nid = i + 1
            node.is_leaf = (len(node.neighbors) == 1)
            if len(node.neighbors) > 1:
                set_atommap(node.mol, node.nid)

        tree_mess = self.jtnn([pred_root])[0]

        cur_mol = copy_edit_mol(pred_root.mol)
        global_amap = [{}] + [{} for node in pred_nodes]
        global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}

        cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode)
        if cur_mol is None: 
            return None

        cur_mol = cur_mol.GetMol()
        set_atommap(cur_mol)
        cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
        if cur_mol is None: return None

        smiles2D = Chem.MolToSmiles(cur_mol)
        stereo_cands = decode_stereo(smiles2D)
        if len(stereo_cands) == 1: 
            return stereo_cands[0]
        stereo_vecs = self.mpn(mol2graph(stereo_cands))
        stereo_vecs = self.G_mean(stereo_vecs)
        scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
        _,max_id = scores.max(dim=0)
        return stereo_cands[max_id.data[0]] 
開發者ID:wengong-jin,項目名稱:icml18-jtnn,代碼行數:36,代碼來源:jtprop_vae.py

示例7: stereo

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def stereo(self, mol_batch, mol_vec):
        stereo_cands,batch_idx = [],[]
        labels = []
        for i,mol_tree in enumerate(mol_batch):
            cands = mol_tree.stereo_cands
            if len(cands) == 1: continue
            if mol_tree.smiles3D not in cands:
                cands.append(mol_tree.smiles3D)
            stereo_cands.extend(cands)
            batch_idx.extend([i] * len(cands))
            labels.append( (cands.index(mol_tree.smiles3D), len(cands)) )

        if len(labels) == 0: 
            return create_var(torch.zeros(1)), 1.0

        batch_idx = create_var(torch.LongTensor(batch_idx))
        stereo_cands = self.mpn(mol2graph(stereo_cands))
        stereo_cands = self.G_mean(stereo_cands)
        stereo_labels = mol_vec.index_select(0, batch_idx)
        scores = torch.nn.CosineSimilarity()(stereo_cands, stereo_labels)

        st,acc = 0,0
        all_loss = []
        for label,le in labels:
            cur_scores = scores.narrow(0, st, le)
            if cur_scores.data[label] >= cur_scores.max().data[0]: 
                acc += 1
            label = create_var(torch.LongTensor([label]))
            all_loss.append( self.stereo_loss(cur_scores.view(1,-1), label) )
            st += le
        #all_loss = torch.cat(all_loss).sum() / len(labels)
        all_loss = sum(all_loss) / len(labels)
        return all_loss, acc * 1.0 / len(labels) 
開發者ID:wengong-jin,項目名稱:icml18-jtnn,代碼行數:35,代碼來源:jtnn_vae.py

示例8: decode

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def decode(self, tree_vec, mol_vec, prob_decode):
        pred_root,pred_nodes = self.decoder.decode(tree_vec, prob_decode)

        #Mark nid & is_leaf & atommap
        for i,node in enumerate(pred_nodes):
            node.nid = i + 1
            node.is_leaf = (len(node.neighbors) == 1)
            if len(node.neighbors) > 1:
                set_atommap(node.mol, node.nid)

        tree_mess = self.jtnn([pred_root])[0]

        cur_mol = copy_edit_mol(pred_root.mol)
        global_amap = [{}] + [{} for node in pred_nodes]
        global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}

        cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode)
        if cur_mol is None: 
            return None

        cur_mol = cur_mol.GetMol()
        set_atommap(cur_mol)
        cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
        if cur_mol is None: return None
        if self.use_stereo == False:
            return Chem.MolToSmiles(cur_mol)

        smiles2D = Chem.MolToSmiles(cur_mol)
        stereo_cands = decode_stereo(smiles2D)
        if len(stereo_cands) == 1: 
            return stereo_cands[0]
        stereo_vecs = self.mpn(mol2graph(stereo_cands))
        stereo_vecs = self.G_mean(stereo_vecs)
        scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
        _,max_id = scores.max(dim=0)
        return stereo_cands[max_id.data[0]] 
開發者ID:wengong-jin,項目名稱:icml18-jtnn,代碼行數:38,代碼來源:jtnn_vae.py

示例9: max_score

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def max_score(self, inputs_d, inputs_q, mask_d, mask_q):
        q_embed = self.word_emb(inputs_q)
        d_embed = self.word_emb(inputs_d)
        
        q_embed_norm = F.normalize(q_embed, 2, 2)
        d_embed_norm = F.normalize(d_embed, 2, 2)

        mask_d = mask_d.view(mask_d.size()[0], mask_d.size()[1], 1)
        mask_q = mask_q.view(mask_q.size()[0], mask_q.size()[1], 1)

        q_embed_norm = q_embed_norm * mask_q
        d_embed_norm = d_embed_norm * mask_d

        q_embed_norm = q_embed_norm.permute(0, 2, 1)
        d_embed_norm = d_embed_norm.permute(0, 2, 1)
        
        maxop_q = nn.MaxPool1d(q_embed_norm.shape[2])
        maxq = maxop_q(q_embed_norm).squeeze()
        
        maxop_d = nn.MaxPool1d(d_embed_norm.shape[2])
        maxd = maxop_d(d_embed_norm).squeeze()
        
        pdist = nn.CosineSimilarity()

        output = pdist(maxq, maxd).unsqueeze(1)

        return output 
開發者ID:thunlp,項目名稱:Kernel-Based-Neural-Ranking-Models,代碼行數:29,代碼來源:MAXPOOL.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def __init__(self, opt, num_features, dict):
        super().__init__()
        self.lt = nn.Embedding(
            num_features,
            opt['embeddingsize'],
            0,
            sparse=True,
            max_norm=opt['embeddingnorm'],
        )
        if not opt['tfidf']:
            dict = None
        self.encoder = Encoder(self.lt, dict)
        if not opt['share_embeddings']:
            self.lt2 = nn.Embedding(
                num_features,
                opt['embeddingsize'],
                0,
                sparse=True,
                max_norm=opt['embeddingnorm'],
            )
            self.encoder2 = Encoder(self.lt2, dict)
        else:
            self.encoder2 = self.encoder
        self.opt = opt
        self.softmax = nn.Softmax(dim=1)
        self.cosine = nn.CosineSimilarity()

        self.lin1 = nn.Linear(opt['embeddingsize'], opt['embeddingsize'], bias=False)
        self.lin2 = nn.Linear(opt['embeddingsize'], opt['embeddingsize'], bias=False)
        self.hops = 1
        self.lins = 0
        if 'hops' in opt:
            self.hops = opt['hops']
        if 'lins' in opt:
            self.lins = opt['lins']
        self.cosineEmbedding = True
        if opt['loss'] == 'nll':
            self.cosineEmbedding = False 
開發者ID:facebookresearch,項目名稱:ParlAI,代碼行數:40,代碼來源:modules.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def __init__(self, dim=1, attn='cosine', residual=False, get_weights=True):
        super().__init__()
        if attn == 'cosine':
            self.cosine = nn.CosineSimilarity(dim=dim)
        self.attn = attn
        self.dim = dim
        self.get_weights = get_weights
        self.residual = residual 
開發者ID:facebookresearch,項目名稱:ParlAI,代碼行數:10,代碼來源:modules.py

示例12: test_composite_op

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def test_composite_op(self, input_shape):
        _set_torch_reg_op("cosine_similarity", custom_cosine_similarity)
        model = nn.CosineSimilarity(dim=1, eps=1e-6)
        run_numerical_test([input_shape, input_shape], model)
        _set_torch_reg_op("cosine_similarity", default_cosine_similarity) 
開發者ID:apple,項目名稱:coremltools,代碼行數:7,代碼來源:test_custom_ops.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def __init__(self, dim=1, attn='cosine', residual=False, get_weights=True):
        super().__init__()
        self.softmax = nn.Softmax(dim=dim)
        if attn == 'cosine':
            self.cosine = nn.CosineSimilarity(dim=dim)
        self.attn = attn
        self.dim = dim
        self.get_weights = get_weights
        self.residual = residual 
開發者ID:natashamjaques,項目名稱:neural_chat,代碼行數:11,代碼來源:modules.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def __init__(self, weighted=False):
        super(CosineSimilarityLossWithMask, self).__init__()
        self.CosineSimilarity = nn.CosineSimilarity(dim=1)
        self.weighted = weighted 
開發者ID:chrisdxie,項目名稱:uois,代碼行數:6,代碼來源:losses.py

示例15: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import CosineSimilarity [as 別名]
def forward(self, x, target, mask=None):
        """ Compute masked cosine similarity loss

            @param x: a [N x C x H x W] torch.FloatTensor of values
            @param target: a [N x C x H x W] torch.FloatTensor of values
            @param mask: a [N x H x W] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table.
                                       Could also be None
        """
        temp = .5 * (1 - self.CosineSimilarity(x, target)) # Shape: [N x H x W]. values are in [0, 1]
        if mask is None:
            return torch.sum(temp) / target.numel() # return mean

        # Compute tabletop objects mask
        binary_object_mask = (mask.clamp(0,2).long() == OBJECTS_LABEL) # Shape: [N x H x W]

        if torch.sum(binary_object_mask) > 0:
            if self.weighted:
                # Compute pixel weights
                weight_mask = torch.zeros_like(mask) # Shape: [N x H x W]. weighted mean over pixels
                unique_object_labels = torch.unique(mask)
                unique_object_labels = unique_object_labels[unique_object_labels >= 2]
                for obj in unique_object_labels:
                    num_pixels = torch.sum(mask == obj, dtype=torch.float)
                    weight_mask[mask == obj] = 1 / num_pixels # inversely proportional to number of pixels
            else:
                weight_mask = binary_object_mask.float() # mean over observed pixels
            loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask) 
        else:
            print("all gradients are 0...")
            loss = torch.tensor(0., dtype=torch.float, device=x.device) # just 0. all gradients will be 0

        bg_mask = ~binary_object_mask
        if torch.sum(bg_mask) > 0:
            bg_loss = 0.1 * torch.sum(temp * bg_mask.float()) / torch.sum(bg_mask.float())
        else:
            bg_loss = torch.tensor(0., dtype=torch.float, device=x.device) # just 0

        return loss + bg_loss 
開發者ID:chrisdxie,項目名稱:uois,代碼行數:40,代碼來源:losses.py


注:本文中的torch.nn.CosineSimilarity方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。