當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.dist方法代碼示例

本文整理匯總了Python中torch.dist方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.dist方法的具體用法?Python torch.dist怎麽用?Python torch.dist使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.dist方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_closest

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def get_closest(target_word: str, word_to_idx: Dict, embeddings: torch.Tensor, n: int = 5) -> List[Tuple[str, torch.Tensor]]:
    """
    Get the n closest
    words to your word.
    """

    # Calculate distances to all other words

    word_embedding = embeddings[word_to_idx[target_word.lower()]]
    distances = []
    for word, index in word_to_idx.items():
        if word == "<MASK>" or word == target_word:
            continue
        distances.append((word, torch.dist(word_embedding, embeddings[index])))

    results = sorted(distances, key=lambda x: x[1])[1:n + 2]
    return results 
開發者ID:feedly,項目名稱:transfer-nlp,代碼行數:19,代碼來源:utils.py

示例2: calc_dists

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def calc_dists(preds, target, normalize, use_zero=False):
    preds = preds.float()
    target = target.float()
    normalize = normalize.float()
    dists = torch.zeros(preds.size(1), preds.size(0))
    if use_zero:
        boundary = 0
    else:
        boundary = 1
    for n in range(preds.size(0)):
        for c in range(preds.size(1)):
            if target[n,c,0] > boundary and target[n, c, 1] > boundary:
                dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
            else:
                dists[c, n] = -1
    return dists 
開發者ID:zhiqiangdon,項目名稱:CU-Net,代碼行數:18,代碼來源:Evaluation_prev_version.py

示例3: batch_euclidean_dist

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def batch_euclidean_dist(x, y):
    """
    Args:
        x: pytorch Variable, with shape [N, m, d]
        y: pytorch Variable, with shape [N, n, d]
    Returns:
        dist: pytorch Variable, with shape [N, m, n]
    """
    assert len(x.size()) == 3
    assert len(y.size()) == 3
    assert x.size(0) == y.size(0)
    assert x.size(-1) == y.size(-1)

    N, m, d = x.size()
    N, n, d = y.size()

    # shape [N, m, n]
    xx = torch.pow(x, 2).sum(-1, keepdim=True).expand(N, m, n)
    yy = torch.pow(y, 2).sum(-1, keepdim=True).expand(N, n, m).permute(0, 2, 1)
    dist = xx + yy
    dist.baddbmm_(1, -2, x, y.permute(0, 2, 1))
    dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
    return dist 
開發者ID:yujheli,項目名稱:ARN,代碼行數:25,代碼來源:reid_loss.py

示例4: get_obs

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def get_obs(Asymm, H, Sx, Sy, Sz, C, E ):
    # A(phy,u,l,d,r), C(d,r), E(u,r,d)
    
    Da = Asymm.size()
    Td = torch.einsum('mefgh,nabcd->eafbgchdmn',(Asymm,Asymm)).contiguous().view(Da[1]**2, Da[2]**2, Da[3]**2, Da[4]**2, Da[0], Da[0])
    #print( torch.dist( Td, Td.permute(0,3,2,1,4,5) ) )    # test left-right reflection symmetry of Td

    CE = torch.tensordot(C,E,([1],[0]))         # C(1d)E(dga)->CE(1ga)
    EL = torch.tensordot(E,CE,([2],[0]))        # E(2e1)CE(1ga)->EL(2ega)  use E(2e1) == E(1e2) 
    EL = torch.tensordot(EL,Td,([1,2],[1,0]))   # EL(2ega)T(gehbmn)->EL(2ahbmn)
    EL = torch.tensordot(EL,CE,([0,2],[0,1]))   # EL(2ahbmn)CE(2hc)->EL(abmnc), use CE(2hc) == CE(1ga) 
    Rho = torch.tensordot(EL,EL,([0,1,4],[0,1,4])).permute(0,2,1,3).contiguous().view(Da[0]**2,Da[0]**2)
    
    # print( (Rho-Rho.t()).norm() )
    Rho = 0.5*(Rho + Rho.t())
    
    Tnorm = Rho.trace()
    Energy = torch.mm(Rho,H).trace()/Tnorm
    Mx = torch.mm(Rho,Sx).trace()/Tnorm
    My = torch.mm(Rho,Sy).trace()/Tnorm
    Mz = torch.mm(Rho,Sz).trace()/Tnorm
   
    #print("Tnorm = %g, Energy = %g " % (Tnorm.item(), Energy.item()) )

    return Energy, Mx, My, Mz 
開發者ID:wangleiphy,項目名稱:tensorgrad,代碼行數:27,代碼來源:measure.py

示例5: test

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def test(model):
    model.eval()
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.zeros(1,299,299,3)
    inputs[0] = torch.from_numpy(img)
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)
    # 1, 3, 299, 299
    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
開發者ID:fastai,項目名稱:imagenet-fast,代碼行數:18,代碼來源:inceptionv4.py

示例6: test_conv2d

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
開發者ID:fastai,項目名稱:imagenet-fast,代碼行數:18,代碼來源:inceptionresnetv2.py

示例7: similarity

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def similarity(self, code_vec, desc_vec):
        """
        https://arxiv.org/pdf/1508.01585.pdf 
        """
        assert self.conf['sim_measure'] in ['cos', 'poly', 'euc', 'sigmoid', 'gesd', 'aesd'], "invalid similarity measure"
        if self.conf['sim_measure']=='cos':
            return F.cosine_similarity(code_vec, desc_vec)
        elif self.conf['sim_measure']=='poly':
            return (0.5*torch.matmul(code_vec, desc_vec.t()).diag()+1)**2
        elif self.conf['sim_measure']=='sigmoid':
            return torch.tanh(torch.matmul(code_vec, desc_vec.t()).diag()+1)
        elif self.conf['sim_measure'] in ['euc', 'gesd', 'aesd']:
            euc_dist = torch.dist(code_vec, desc_vec, 2) # or torch.norm(code_vec-desc_vec,2)
            euc_sim = 1 / (1 + euc_dist)
            if self.conf['sim_measure']=='euc': return euc_sim                
            sigmoid_sim = torch.sigmoid(torch.matmul(code_vec, desc_vec.t()).diag()+1)
            if self.conf['sim_measure']=='gesd': 
                return euc_sim * sigmoid_sim
            elif self.conf['sim_measure']=='aesd':
                return 0.5*(euc_sim+sigmoid_sim) 
開發者ID:guxd,項目名稱:deep-code-search,代碼行數:22,代碼來源:jointemb.py

示例8: peste_distance

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def peste_distance(self) -> np.ndarray:
        """Calculates the euclidean distance between pixels of two different arrays
        on a vector of observations, and normalizes the result applying the relativize function.
        In a more general scenario, any function that quantifies the notion of "how different two
        observations are" could work, even if it is not a proper distance.
        """
        # Get random companion
        peste_obs = self.get_peste_obs()
        # Euclidean distance between states (pixels / RAM)
        # obs = self.observations.astype(np.float32).reshape((self.n_walkers, -1))
        dist = self.wasserstein_distance(np.array(self.observations), peste_obs)
        return relativize_vector(dist) 
開發者ID:Guillemdb,項目名稱:FractalAI,代碼行數:14,代碼來源:dnn_train.py

示例9: inverse_distance

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def inverse_distance(h, h_i, epsilon=1e-3):
  return 1 / (torch.dist(h, h_i) + epsilon) 
開發者ID:mjacar,項目名稱:pytorch-nec,代碼行數:4,代碼來源:math_utils.py

示例10: update

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def update(self):
    """
    Iterate through the transition queue and make NEC updates
    """
    for t in range(len(self.transition_queue)):
      transition = self.transition_queue[t]
      state = Variable(Tensor(transition.state)).unsqueeze(0)
      action = transition.action
      state_embedding = self.embedding_network(move_to_gpu(state))
      dnd = self.dnd_list[action]

      Q_N = move_to_gpu(self.Q_lookahead(t))
      embedding_index = dnd.get_index(state_embedding)
      if embedding_index is None:
        dnd.insert(state_embedding.detach(), Q_N.detach().unsqueeze(0))
      else:
        Q = self.Q_update(dnd.values[embedding_index], Q_N)
        dnd.update(Q.detach(), embedding_index)
      self.replay_memory.push(transition.state, action,
                              move_to_gpu(Q_N.detach()))

    [dnd.commit_insert() for dnd in self.dnd_list]

    for t in range(len(self.transition_queue)):
      if t % self.update_period == 0 or t == len(self.transition_queue) - 1:
        # Train on random mini-batch from self.replay_memory
        batch = self.replay_memory.sample(self.batch_size)
        actual = torch.cat([sample.Q_N for sample in batch])
        predicted = torch.cat([self.dnd_list[sample.action].lookup(self.embedding_network(move_to_gpu(
            Variable(Tensor(sample.state))).unsqueeze(0)), update_flag=True) for sample in batch])
        loss = torch.dist(actual, move_to_gpu(predicted))
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        [dnd.update_params() for dnd in self.dnd_list]

    # Clear out transition queue
    self.transition_queue = [] 
開發者ID:mjacar,項目名稱:pytorch-nec,代碼行數:40,代碼來源:nec_agent.py

示例11: approx_PCKh

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def approx_PCKh(pred, target, idxs, res):
    # pred: b x n x 2 tensor
    # target: b x n x 2 tensor
    assert(pred.size()==target.size())
    target = target.float()
    # distances between prediction and groundtruth coordinates
    dists = torch.zeros((pred.size(1), pred.size(0)))
    normalize = res/10
    for i in range(pred.size(1)):
        for j in range(pred.size(0)):
            if target[j][i][0] > 0 and target[j][i][1] > 0:
                dists[i][j] = torch.dist(target[j][i], pred[j][i]) / normalize
            else:
                dists[i][j] = -1
    # accuracies based on the distances
    threshold = 0.5
    avg_acc = 0
    bad_idx_count = 0
    for i in range(len(idxs)):
        per_joint_dists = dists[idxs[i]]
        if torch.ne(per_joint_dists, -1).sum() > 0:
            valid_count = per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum()
            all_count = per_joint_dists.ne(-1).sum()
            # print(valid_count)
            # print(type(valid_count))
            # exit()
            per_joint_acc = float(valid_count) / float(all_count)
            # print(per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum())
            # print('joint {0} accuracy is {1}' .format(idxs[i]+1, per_joint_acc))
        else:
            per_joint_acc = -1
        if per_joint_acc >= 0:
            avg_acc += per_joint_acc
        else:
            bad_idx_count += 1
    avg_acc = avg_acc / (len(idxs)-bad_idx_count)
    # exit()
    return avg_acc 
開發者ID:zhiqiangdon,項目名稱:CU-Net,代碼行數:40,代碼來源:HumanAcc.py

示例12: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def forward(self, re_img, gt_img):
        p = 2
        if self.dist_metric == 'L1':
            p = 1
        b,c,h,w = gt_img.size()
        loss = torch.dist(re_img, gt_img, p=p) / (b*h*w)
        return loss 
開發者ID:yujheli,項目名稱:ARN,代碼行數:9,代碼來源:reid_loss.py

示例13: euclidean_dist

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def euclidean_dist(x, y):
    """
    Args:
        x: pytorch Variable, with shape [m, d]
        y: pytorch Variable, with shape [n, d]
    Returns:
        dist: pytorch Variable, with shape [m, n]
    """
    m, n = x.size(0), y.size(0)
    xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
    yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
    dist = xx + yy
    dist.addmm_(1, -2, x, y.t())
    dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
    return dist 
開發者ID:yujheli,項目名稱:ARN,代碼行數:17,代碼來源:reid_loss.py

示例14: _one_distance

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def _one_distance(ws1, ws2):
        dist = 0
        for w1, w2 in zip(ws1, ws2):
            dist += np.sqrt(np.sum((w1 - w2) ** 2))
        return dist 
開發者ID:Guillemdb,項目名稱:FractalAI,代碼行數:7,代碼來源:dnn_train.py

示例15: evaluate_distance

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import dist [as 別名]
def evaluate_distance(self) -> np.ndarray:
        """Calculates the euclidean distance between pixels of two different arrays
        on a vector of observations, and normalizes the result applying the relativize function.
        In a more general scenario, any function that quantifies the notion of "how different two
        observations are" could work, even if it is not a proper distance.
        """

        # Get random companion
        idx = np.random.permutation(np.arange(self.n_walkers, dtype=int))
        # Euclidean distance between states (pixels / RAM)
        obs = self.observations.astype(np.float32)
        dist = self.wasserstein_distance(obs[idx], obs)  # ** 2
        return relativize_vector(dist) 
開發者ID:Guillemdb,項目名稱:FractalAI,代碼行數:15,代碼來源:dnn_train.py


注:本文中的torch.dist方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。