当前位置: 首页>>代码示例>>Python>>正文


Python functional.pairwise_distance方法代码示例

本文整理汇总了Python中torch.nn.functional.pairwise_distance方法的典型用法代码示例。如果您正苦于以下问题:Python functional.pairwise_distance方法的具体用法?Python functional.pairwise_distance怎么用?Python functional.pairwise_distance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.pairwise_distance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: kmeans

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def kmeans(input, n_clusters=16, tol=1e-6):
  """
  TODO: check correctness
  """
  indices = torch.Tensor(np.random.choice(input.size(-1), n_clusters))
  values = input[:, :, indices]

  while True:
    dist = func.pairwise_distance(
      input.unsqueeze(2).expand(-1, -1, values.size(2), input.size(2)).reshape(
        input.size(0), input.size(1), input.size(2) * values.size(2)),
      values.unsqueeze(3).expand(-1, -1, values.size(2), input.size(2)).reshape(
        input.size(0), input.size(1), input.size(2) * values.size(2))
    )
    choice_cluster = torch.argmin(dist, dim=1)
    old_values = values
    values = input[choice_cluster.nonzeros()]
    shift = (old_values - values).norm(dim=1)
    if shift.max() ** 2 < tol:
      break

  return values 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:24,代码来源:fast_gaussian.py

示例2: embed_forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def embed_forward(self, embed_x, embed_y, embed_z):
        """embed_x, mask_norm_x: type_specific net output (Anchor)
           embed_y, mask_norm_y: type_specifc net output (Negative)
           embed_z, mask_norm_z: type_specifi net output (Positive)
           conditions: only x(anchor data) has conditions
        """
        if self.metric_branch is None:
            dist_neg = F.pairwise_distance(embed_x, embed_y, 2)
            dist_pos = F.pairwise_distance(embed_x, embed_z, 2)
        else:
            dist_neg = self.metric_branch(embed_x * embed_y)
            dist_pos = self.metric_branch(embed_x * embed_z)

        target = torch.FloatTensor(dist_neg.size()).fill_(1)
        target = Variable(target.cuda())

        # type specific triplet loss
        loss_type_triplet = self.loss_triplet(dist_neg, dist_pos, target)
        return loss_type_triplet 
开发者ID:open-mmlab,项目名称:mmfashion,代码行数:21,代码来源:triplet_net.py

示例3: text_forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def text_forward(self, x, y, z):
        """ x: Anchor data
            y: Distant (negative) data
            z: Close (positive) data
        """
        desc_x = self.text_branch(x.text)
        desc_y = self.text_branch(y.text)
        desc_z = self.text_branch(z.text)
        distd_p = F.pairwise_distance(desc_y, desc_z, 2)
        distd_n1 = F.pairwise_distance(desc_x, desc_y, 2)
        distd_n2 = F.pairwise_distance(desc_x, desc_z, 2)
        has_text = x.has_text * y.has_text * z.has_text
        loss_sim_t1 = selective_margin_loss(distd_p, distd_n1, self.margin, has_text)
        loss_sim_t2 = selective_margin_loss(distd_p, distd_n2, self.margin, has_text)
        loss_sim_t = (loss_sim_t1 + loss_sim_t2) / 2.
        return loss_sim_t, desc_x, desc_y, desc_z 
开发者ID:mvasil,项目名称:fashion-compatibility,代码行数:18,代码来源:tripletnet.py

示例4: calc_vse_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def calc_vse_loss(self, desc_x, general_x, general_y, general_z, has_text):
        """ Both y and z are assumed to be negatives because they are not from the same 
            item as x

            desc_x: Anchor language embedding
            general_x: Anchor visual embedding
            general_y: Visual embedding from another item from input triplet
            general_z: Visual embedding from another item from input triplet
            has_text: Binary indicator of whether x had a text description
        """
        distd1_p = F.pairwise_distance(general_x, desc_x, 2)
        distd1_n1 = F.pairwise_distance(general_y, desc_x, 2)
        distd1_n2 = F.pairwise_distance(general_z, desc_x, 2)
        loss_vse_1 = selective_margin_loss(distd1_p, distd1_n1, self.margin, has_text)
        loss_vse_2 = selective_margin_loss(distd1_p, distd1_n2, self.margin, has_text)
        return (loss_vse_1 + loss_vse_2) / 2. 
开发者ID:mvasil,项目名称:fashion-compatibility,代码行数:18,代码来源:tripletnet.py

示例5: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def forward(self, anchor, positive, negative):
        assert anchor.size() == positive.size(), "Input sizes between positive and negative must be equal."
        assert anchor.size() == negative.size(), "Input sizes between anchor and negative must be equal."
        assert positive.size() == negative.size(), "Input sizes between positive and negative must be equal."
        assert anchor.dim() == 2, "Input must be a 2D matrix."

        d_p = F.pairwise_distance(anchor, positive, self.p, self.eps)
        d_n = F.pairwise_distance(anchor, negative, self.p, self.eps)

        if self.swap:
            d_s = F.pairwise_distance(positive, negative, self.p, self.eps)
            d_n = torch.min(d_n, d_s)

        dist = torch.log(1 + torch.exp(d_p - d_n))
        loss = torch.mean(dist)
        return loss 
开发者ID:almazan,项目名称:deep-image-retrieval,代码行数:18,代码来源:loss.py

示例6: test_inference_hmm_posterior_importance_sampling

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def test_inference_hmm_posterior_importance_sampling(self):
        samples = importance_sampling_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.001

        start = time.time()
        posterior = self._model.posterior_results(samples, observe=observation)
        add_importance_sampling_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
开发者ID:pyprob,项目名称:pyprob,代码行数:24,代码来源:test_inference.py

示例7: test_inference_hmm_posterior_importance_sampling_with_inference_network_ff

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def test_inference_hmm_posterior_importance_sampling_with_inference_network_ff(self):
        samples = importance_sampling_with_inference_network_ff_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.001

        self._model.reset_inference_network()
        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_ff_training_traces, observe_embeddings={'obs{}'.format(i): {'depth': 2, 'dim': 32} for i in range(len(observation))}, prior_inflation=importance_sampling_with_inference_network_ff_prior_inflation, inference_network=InferenceNetwork.FEEDFORWARD)

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observation)
        add_importance_sampling_with_inference_network_ff_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_with_inference_network_ff_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
开发者ID:pyprob,项目名称:pyprob,代码行数:27,代码来源:test_inference.py

示例8: test_inference_hmm_posterior_importance_sampling_with_inference_network_lstm

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def test_inference_hmm_posterior_importance_sampling_with_inference_network_lstm(self):
        samples = importance_sampling_with_inference_network_ff_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.001

        self._model.reset_inference_network()
        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_lstm_training_traces, observe_embeddings={'obs{}'.format(i): {'depth': 2, 'dim': 32} for i in range(len(observation))}, prior_inflation=importance_sampling_with_inference_network_lstm_prior_inflation, inference_network=InferenceNetwork.LSTM)

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observation)
        add_importance_sampling_with_inference_network_lstm_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_with_inference_network_lstm_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
开发者ID:pyprob,项目名称:pyprob,代码行数:27,代码来源:test_inference.py

示例9: test_inference_hmm_posterior_lightweight_metropolis_hastings

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def test_inference_hmm_posterior_lightweight_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_lightweight_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
开发者ID:pyprob,项目名称:pyprob,代码行数:21,代码来源:test_inference.py

示例10: test_inference_hmm_posterior_random_walk_metropolis_hastings

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def test_inference_hmm_posterior_random_walk_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
开发者ID:pyprob,项目名称:pyprob,代码行数:21,代码来源:test_inference.py

示例11: test_inference_remote_hmm_posterior_importance_sampling_with_inference_network

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def test_inference_remote_hmm_posterior_importance_sampling_with_inference_network(self):
        samples = importance_sampling_with_inference_network_samples
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct
        posterior_effective_sample_size_min = samples * 0.03

        self._model.reset_inference_network()
        self._model.learn_inference_network(num_traces=importance_sampling_with_inference_network_training_traces, observe_embeddings={'obs{}'.format(i): {'depth': 2, 'dim': 16} for i in range(len(observation))})

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK, observe=observation)
        add_importance_sampling_with_inference_network_duration(time.time() - start)
        posterior_mean_unweighted = posterior.unweighted().mean
        posterior_mean = posterior.mean
        posterior_effective_sample_size = float(posterior.effective_sample_size)

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'posterior_mean_unweighted', 'posterior_mean', 'posterior_mean_correct', 'posterior_effective_sample_size', 'posterior_effective_sample_size_min', 'l2_distance', 'kl_divergence')
        add_importance_sampling_with_inference_network_kl_divergence(kl_divergence)

        self.assertGreater(posterior_effective_sample_size, posterior_effective_sample_size_min)
        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
开发者ID:pyprob,项目名称:pyprob,代码行数:27,代码来源:test_inference_remote.py

示例12: test_inference_remote_hmm_posterior_lightweight_metropolis_hastings

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def test_inference_remote_hmm_posterior_lightweight_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_lightweight_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_lightweight_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
开发者ID:pyprob,项目名称:pyprob,代码行数:21,代码来源:test_inference_remote.py

示例13: test_inference_remote_hmm_posterior_random_walk_metropolis_hastings

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def test_inference_remote_hmm_posterior_random_walk_metropolis_hastings(self):
        samples = lightweight_metropolis_hastings_samples
        burn_in = lightweight_metropolis_hastings_burn_in
        observation = {'obs{}'.format(i): self._observation[i] for i in range(len(self._observation))}
        posterior_mean_correct = self._posterior_mean_correct

        start = time.time()
        posterior = self._model.posterior_results(samples, inference_engine=InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS, observe=observation)[burn_in:]
        add_random_walk_metropolis_hastings_duration(time.time() - start)
        posterior_mean = posterior.mean

        l2_distance = float(F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())
        kl_divergence = float(sum([pyprob.distributions.Distribution.kl_divergence(Categorical(i + util._epsilon), Categorical(j + util._epsilon)) for (i, j) in zip(posterior_mean, posterior_mean_correct)]))

        util.eval_print('samples', 'burn_in', 'posterior_mean', 'posterior_mean_correct', 'l2_distance', 'kl_divergence')
        add_random_walk_metropolis_hastings_kl_divergence(kl_divergence)

        self.assertLess(l2_distance, 3)
        self.assertLess(kl_divergence, 1) 
开发者ID:pyprob,项目名称:pyprob,代码行数:21,代码来源:test_inference_remote.py

示例14: perceptual_features_reconstruction

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def perceptual_features_reconstruction(list_attentions_a, list_attentions_b, factor=1.):
    loss = 0.

    for i, (a, b) in enumerate(zip(list_attentions_a, list_attentions_b)):
        bs, c, w, h = a.shape

        # a of shape (b, c, w, h) to (b, c * w * h)
        a = a.view(bs, -1)
        b = b.view(bs, -1)

        a = F.normalize(a, p=2, dim=-1)
        b = F.normalize(b, p=2, dim=-1)

        layer_loss = (F.pairwise_distance(a, b, p=2)**2) / (c * w * h)
        loss += torch.mean(layer_loss)

    return factor * (loss / len(list_attentions_a)) 
开发者ID:arthurdouillard,项目名称:incremental_learning.pytorch,代码行数:19,代码来源:distillation.py

示例15: base

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pairwise_distance [as 别名]
def base(self, x, y, z):
        #base_y = self.basenet(y)
        #if random.random() > .5:  # TODO Debug, make sure order doesn't matter
        #    base_x = self.basenet(x)
        #    base_z = self.basenet(z)
        #else:
        #    base_z = self.basenet(z)
        #    base_x = self.basenet(x)
        base_x = self.basenet(x)
        base_y = self.basenet(y)
        base_z = self.basenet(z)

        if self.distance == 'cosine':
            dist_a = .5 - .5 * F.cosine_similarity(base_x, base_y, 1, 1e-6).view(-1)
            dist_b = .5 - .5 * F.cosine_similarity(base_y, base_z, 1, 1e-6).view(-1)
        elif self.distance == 'l2':
            dist_a = F.pairwise_distance(base_x, base_y, 2).view(-1)
            dist_b = F.pairwise_distance(base_y, base_z, 2).view(-1)
        else:
            assert False, "Wrong args.distance"

        print('fc7 norms:', base_x.norm().item(), base_y.norm().item(), base_z.norm().item())
        print('pairwise dist means:', dist_a.mean().item(), dist_b.mean().item())
        return base_x, base_y, base_z, dist_a, dist_b 
开发者ID:gsig,项目名称:PyVideoResearch,代码行数:26,代码来源:actor_observer_wrapper.py


注:本文中的torch.nn.functional.pairwise_distance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。