本文整理汇总了Python中torch.nn.functional.cosine_similarity方法的典型用法代码示例。如果您正苦于以下问题:Python functional.cosine_similarity方法的具体用法?Python functional.cosine_similarity怎么用?Python functional.cosine_similarity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.cosine_similarity方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: perform_verification
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def perform_verification(use_cuda, model, embeddings, enroll_speaker, test_filename, test_frames, thres):
enroll_embedding = embeddings[enroll_speaker]
test_embedding = get_embeddings(use_cuda, test_filename, model, test_frames)
score = F.cosine_similarity(test_embedding, enroll_embedding)
score = score.data.cpu().numpy()
if score > thres:
result = 'Accept'
else:
result = 'Reject'
test_spk = test_filename.split('/')[-2].split('_')[0]
print("\n=== Speaker verification ===")
print("True speaker: %s\nClaimed speaker : %s\n\nResult : %s\n" %(enroll_speaker, test_spk, result))
print("Score : %0.4f\nThreshold : %0.2f\n" %(score, thres))
示例2: _algo_1_horiz_comp
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def _algo_1_horiz_comp(self, sent1_block_a, sent2_block_a):
comparison_feats = []
regM1, regM2 = [], []
for ws in self.filter_widths:
x1 = sent1_block_a[ws]['max'].unsqueeze(2)
x2 = sent2_block_a[ws]['max'].unsqueeze(2)
if np.isinf(ws):
x1 = x1.expand(-1, self.n_holistic_filters, -1)
x2 = x2.expand(-1, self.n_holistic_filters, -1)
regM1.append(x1)
regM2.append(x2)
regM1 = torch.cat(regM1, dim=2)
regM2 = torch.cat(regM2, dim=2)
# Cosine similarity
comparison_feats.append(F.cosine_similarity(regM1, regM2, dim=2))
# Euclidean distance
pairwise_distances = []
for x1, x2 in zip(regM1, regM2):
dist = F.pairwise_distance(x1, x2).view(1, -1)
pairwise_distances.append(dist)
comparison_feats.append(torch.cat(pairwise_distances))
return torch.cat(comparison_feats, dim=1)
示例3: linear_motion_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def linear_motion_loss(outputs, mask):
#batch_size = outputs.shape[0]
s_len = outputs.shape[1]
loss = outputs.new_zeros(1)
for idx in range(2, s_len, 1):
# mask loss to valid outputs
# motion_mask: (B, 1), the mask of current frame
motion_mask = mask[:, idx].view(mask.shape[0], 1)
# Loss: |(loc_t - loc_t-1), (loc_t-1, loc_t-2)|_1 for t = [2, s_len]
# If loc_t is empty, mask it out by motion_mask
curr_motion = (outputs[:, idx] - outputs[:, idx - 1]) * motion_mask
past_motion = (outputs[:, idx - 1] - outputs[:, idx - 2]) * motion_mask
loss += torch.mean(1.0 - F.cosine_similarity(past_motion, curr_motion))
loss += F.l1_loss(past_motion, curr_motion)
return loss / (torch.sum(mask))
示例4: embedding_cosine
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def embedding_cosine(self, src_embedding, table_embedding, table_unk_mask):
embedding_differ = []
for i in range(table_embedding.size(1)):
one_table_embedding = table_embedding[:, i, :]
one_table_embedding = one_table_embedding.unsqueeze(1).expand(table_embedding.size(0),
src_embedding.size(1),
table_embedding.size(2))
topk_val = F.cosine_similarity(one_table_embedding, src_embedding, dim=-1)
embedding_differ.append(topk_val)
embedding_differ = torch.stack(embedding_differ).transpose(1, 0)
embedding_differ.data.masked_fill_(table_unk_mask.unsqueeze(2).expand(
table_embedding.size(0),
table_embedding.size(1),
embedding_differ.size(2)
).bool(), 0)
return embedding_differ
示例5: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def forward(self, repres, max_att):
"""
Args:
repres - [bsz, a_len|q_len, cont_dim]
max_att - [bsz, q_len|a_len, cont_dim]
Return:
size - [bsz, sentence_len, mp_dim]
"""
bsz = repres.size(0)
sent_len = repres.size(1)
repres = repres.view(-1, self.cont_dim)
max_att = max_att.view(-1, self.cont_dim)
repres = multi_perspective_expand_for_2D(repres, self.weight)
max_att = multi_perspective_expand_for_2D(max_att, self.weight)
temp = cosine_similarity(repres, max_att, repres.dim()-1)
return temp.view(bsz, sent_len, self.mp_dim)
示例6: worker_cos_reward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def worker_cos_reward(self, feature_array, goal_array):
"""
Get reward for worker (cosine distance)
:return: cos_loss: batch_size * seq_len
"""
for i in range(int(self.max_seq_len / self.step_size)):
real_feature = feature_array[:, i * self.step_size, :].unsqueeze(1).expand((-1, self.step_size, -1))
feature_array[:, i * self.step_size:(i + 1) * self.step_size, :] = real_feature
if i > 0:
sum_goal = torch.sum(goal_array[:, (i - 1) * self.step_size:i * self.step_size, :], dim=1, keepdim=True)
else:
sum_goal = goal_array[:, 0, :].unsqueeze(1)
goal_array[:, i * self.step_size:(i + 1) * self.step_size, :] = sum_goal.expand((-1, self.step_size, -1))
offset_feature = feature_array[:, 1:, :] # f_{t+1}, batch_size * seq_len * goal_out_size
goal_array = goal_array[:, :self.max_seq_len, :] # batch_size * seq_len * goal_out_size
sub_feature = offset_feature - goal_array
# L2 normalization
sub_feature = F.normalize(sub_feature, p=2, dim=-1)
all_goal = F.normalize(goal_array, p=2, dim=-1)
cos_loss = F.cosine_similarity(sub_feature, all_goal, dim=-1) # batch_size * seq_len
return cos_loss
示例7: model_cosine_similarity
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def model_cosine_similarity(self, model, target_params_variables,
model_id='attacker'):
cs_list = list()
cs_loss = torch.nn.CosineSimilarity(dim=0)
for name, data in model.named_parameters():
if name == 'decoder.weight':
continue
model_update = 100*(data.view(-1) - target_params_variables[name].view(-1)) + target_params_variables[name].view(-1)
cs = F.cosine_similarity(model_update,
target_params_variables[name].view(-1), dim=0)
# logger.info(torch.equal(layer.view(-1),
# target_params_variables[name].view(-1)))
# logger.info(name)
# logger.info(cs.data[0])
# logger.info(torch.norm(model_update).data[0])
# logger.info(torch.norm(fake_weights[name]))
cs_list.append(cs)
cos_los_submit = 1*(1-sum(cs_list)/len(cs_list))
logger.info(model_id)
logger.info((sum(cs_list)/len(cs_list)).data[0])
return 1e3*sum(cos_los_submit)
示例8: base
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def base(self, x, y, z):
#base_y = self.basenet(y)
#if random.random() > .5: # TODO Debug, make sure order doesn't matter
# base_x = self.basenet(x)
# base_z = self.basenet(z)
#else:
# base_z = self.basenet(z)
# base_x = self.basenet(x)
base_x = self.basenet(x)
base_y = self.basenet(y)
base_z = self.basenet(z)
if self.distance == 'cosine':
dist_a = .5 - .5 * F.cosine_similarity(base_x, base_y, 1, 1e-6).view(-1)
dist_b = .5 - .5 * F.cosine_similarity(base_y, base_z, 1, 1e-6).view(-1)
elif self.distance == 'l2':
dist_a = F.pairwise_distance(base_x, base_y, 2).view(-1)
dist_b = F.pairwise_distance(base_y, base_z, 2).view(-1)
else:
assert False, "Wrong args.distance"
print('fc7 norms:', base_x.norm().item(), base_y.norm().item(), base_z.norm().item())
print('pairwise dist means:', dist_a.mean().item(), dist_b.mean().item())
return base_x, base_y, base_z, dist_a, dist_b
示例9: _algo_2_vert_comp
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a, sent1_block_b, sent2_block_b):
comparison_feats = []
ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
for pool in ('max', 'min', 'mean'):
for ws1 in self.filter_widths:
x1 = sent1_block_a[ws1][pool]
for ws2 in self.filter_widths:
x2 = sent2_block_a[ws2][pool]
if (not np.isinf(ws1) and not np.isinf(ws2)) or (np.isinf(ws1) and np.isinf(ws2)):
comparison_feats.append(F.cosine_similarity(x1, x2).unsqueeze(1))
comparison_feats.append(F.pairwise_distance(x1, x2).unsqueeze(1))
comparison_feats.append(torch.abs(x1 - x2))
for pool in ('max', 'min'):
for ws in ws_no_inf:
oG_1B = sent1_block_b[ws][pool]
oG_2B = sent2_block_b[ws][pool]
for i in range(0, self.n_per_dim_filters):
x1 = oG_1B[:, :, i]
x2 = oG_2B[:, :, i]
comparison_feats.append(F.cosine_similarity(x1, x2).unsqueeze(1))
comparison_feats.append(F.pairwise_distance(x1, x2).unsqueeze(1))
comparison_feats.append(torch.abs(x1 - x2))
return torch.cat(comparison_feats, dim=1)
示例10: cosine_similary
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def cosine_similary(self, item):
text = item[1]
words = text.split()
vector = [0] * len(self.feature_vector)
for word in words:
if word not in self.feature_idx:
self.feature_idx[word] = len(self.feature_vector)
self.feature_vector.append(0)
vector.append(1)
else:
while len(vector) <= self.feature_idx[word]:
vector.append(0)
self.feature_vector.append(0)
vector[self.feature_idx[word]] += 1
item_tensor = torch.FloatTensor(vector)
cluster_tensor = torch.FloatTensor(self.feature_vector)
similarity = F.cosine_similarity(item_tensor, cluster_tensor, 0)
# Alternatively using `F.pairwise_distance()` but normalize the cluster first
return similarity.item() # item() converts tensor value to float
示例11: similarity
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def similarity(self, code_vec, desc_vec):
"""
https://arxiv.org/pdf/1508.01585.pdf
"""
assert self.conf['sim_measure'] in ['cos', 'poly', 'euc', 'sigmoid', 'gesd', 'aesd'], "invalid similarity measure"
if self.conf['sim_measure']=='cos':
return F.cosine_similarity(code_vec, desc_vec)
elif self.conf['sim_measure']=='poly':
return (0.5*torch.matmul(code_vec, desc_vec.t()).diag()+1)**2
elif self.conf['sim_measure']=='sigmoid':
return torch.tanh(torch.matmul(code_vec, desc_vec.t()).diag()+1)
elif self.conf['sim_measure'] in ['euc', 'gesd', 'aesd']:
euc_dist = torch.dist(code_vec, desc_vec, 2) # or torch.norm(code_vec-desc_vec,2)
euc_sim = 1 / (1 + euc_dist)
if self.conf['sim_measure']=='euc': return euc_sim
sigmoid_sim = torch.sigmoid(torch.matmul(code_vec, desc_vec.t()).diag()+1)
if self.conf['sim_measure']=='gesd':
return euc_sim * sigmoid_sim
elif self.conf['sim_measure']=='aesd':
return 0.5*(euc_sim+sigmoid_sim)
示例12: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def forward(self, d1_r, d1_c, d1_l, d2_r, d2_c, d2_l):
siamese_embed1 = self.model_siamese(d1_r)
siamese_embed2 = self.model_siamese(d2_r)
outputs = [siamese_embed1, siamese_embed2]
if self.dist_fn == "concat":
output_concat = self.model_concat(siamese_embed1, siamese_embed2)
output = output_concat
outputs.append(output)
elif self.dist_fn == "cos":
output_cos = F.cosine_similarity(siamese_embed1 + 1e-16, siamese_embed2 + 1e-16, dim=-1)
output = output_cos
outputs.append(output)
elif self.dist_fn == "widedeep":
output_widedeep = self.model_widedeep(siamese_embed1, siamese_embed2, d1_c, d2_c)
output = output_widedeep
outputs.append(output)
return outputs
示例13: matching_strategy_full
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def matching_strategy_full(self, v1, v2, W):
"""
:param v1: batch x seq_len x n_hidden
:param v2: batch x n_hidden (FULL) or batch x seq_len x n_hidden (ATTENTIVE)
:param W: l x n_hidden
:return: batch x seq_len x l
"""
l = W.size(0)
batch_size = v1.size(0)
seq_len = v1.size(1)
v1 = v1.unsqueeze(2).expand(-1, -1, l, -1) # batch x seq_len x l x n_hidden
W_expanded = W.expand(batch_size, seq_len, -1, -1) # batch x seq_len x l x n_hidden
Wv1 = W_expanded.mul(v1) # batch x seq_len x l x n_hidden
if len(v2.size()) == 2:
v2 = v2.unsqueeze(1).unsqueeze(1).expand(-1, seq_len, l, -1) # batch x seq_len x l x n_hidden
elif len(v2.size()) == 3:
v2 = v2.unsqueeze(2).expand(-1, -1, l, -1) # batch x seq_len x l x n_hidden
else:
raise ValueError(f'Invalid v2 tensor size {v2.size()}')
Wv2 = W_expanded.mul(v2)
cos_sim = F.cosine_similarity(Wv1, Wv2, dim=3)
return cos_sim
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def forward(self, query_users, query_items, support_users, support_items):
"""
:param query_users: (batch_size,)
:param query_items: (batch_size,)
:param support_users: (few_size,)
:param support_items: (few_size,)
:return: (batch_size, )
:return:
"""
query_users, query_items = self.user_embeds(query_users), self.item_embeds(query_items)
support_users, support_items = self.user_embeds(support_users), self.item_embeds(support_items)
similarity = F.softmax(F.cosine_similarity(query_users.unsqueeze(-1), support_users.t().unsqueeze(0)), dim=1)
item_embeds = torch.matmul(similarity, support_items)
return F.cosine_similarity(query_items, item_embeds, dim=1)
示例15: distance_metric
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import cosine_similarity [as 别名]
def distance_metric(student, teacher, option, weights=None):
"""Distance metric to calculate the imitation loss.
Args:
student: batch_size x n_classes
teacher: batch_size x n_classes
option: one of [cosine, l2, l2, kl]
weights: batch_size or float
Returns:
The computed distance metric.
"""
if option == 'cosine':
dists = 1 - F.cosine_similarity(student, teacher.detach(), dim=1)
elif option == 'l2':
dists = (student-teacher.detach()).pow(2).sum(1)
elif option == 'l1':
dists = torch.abs(student-teacher.detach()).sum(1)
elif option == 'kl':
assert weights is None
T = 8
# averaged for each minibatch
dist = F.kl_div(
F.log_softmax(student / T), F.softmax(teacher.detach() / T)) * (
T * T)
return dist
else:
raise NotImplementedError
if weights is None:
dist = dists.mean()
else:
dist = (dists * weights).mean()
return dist