當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.var方法代碼示例

本文整理匯總了Python中torch.var方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.var方法的具體用法?Python torch.var怎麽用?Python torch.var使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.var方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: evo_norm

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def evo_norm(x, prefix, running_var, v, weight, bias,
             training, momentum, eps=0.1, groups=32):
    if prefix == 'b0':
        if training:
            var = torch.var(x, dim=(0, 2, 3), keepdim=True)
            running_var.mul_(momentum)
            running_var.add_((1 - momentum) * var)
        else:
            var = running_var
        if v is not None:
            den = torch.max((var + eps).sqrt(), v * x + instance_std(x, eps))
            x = x / den * weight + bias
        else:
            x = x * weight + bias
    else:
        if v is not None:
            x = x * torch.sigmoid(v * x) / group_std(x,
                                                     groups, eps) * weight + bias
        else:
            x = x * weight + bias

    return x 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:24,代碼來源:functional.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def forward(self, x, y):
        means = torch.mean(x, dim=(2, 3))
        m = torch.mean(means, dim=-1, keepdim=True)
        v = torch.var(means, dim=-1, keepdim=True)
        means = (means - m) / (torch.sqrt(v + 1e-5))
        h = self.instance_norm(x)

        if self.bias:
            gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
        else:
            gamma, alpha = self.embed(y).chunk(2, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h
        return out 
開發者ID:wgrathwohl,項目名稱:JEM,代碼行數:18,代碼來源:norms.py

示例3: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def forward(self, inputs, context=None):
        if inputs.dim() != 2:
            raise ValueError('Expected 2-dim inputs, got inputs of shape: {}'.format(inputs.shape))

        if self.training:
            mean, var = inputs.mean(0), inputs.var(0)
            self.running_mean.mul_(1 - self.momentum).add_(mean * self.momentum)
            self.running_var.mul_(1 - self.momentum).add_(var * self.momentum)
        else:
            mean, var = self.running_mean, self.running_var

        outputs = self.weight * ((inputs - mean) / torch.sqrt((var + self.eps))) + self.bias

        logabsdet_ = torch.log(self.weight) - 0.5 * torch.log(var + self.eps)
        logabsdet = torch.sum(logabsdet_) * torch.ones(inputs.shape[0])

        return outputs, logabsdet 
開發者ID:bayesiains,項目名稱:nsf,代碼行數:19,代碼來源:normalization.py

示例4: unify_sentence

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def unify_sentence(self, sentence_feature, one_sentence_embedding):
        """
            Unify Sentence By Token Importance
        """
        sent_len = one_sentence_embedding.size()[0]

        var_token = torch.zeros(sent_len, device=one_sentence_embedding.device)
        for token_index in range(sent_len):
            token_feature = sentence_feature[:, token_index, :]
            sim_map = self.cosine_similarity_torch(token_feature)
            var_token[token_index] = torch.var(sim_map.diagonal(-1))

        var_token = var_token / torch.sum(var_token)
        sentence_embedding = torch.mv(one_sentence_embedding.t(), var_token)

        return sentence_embedding 
開發者ID:UKPLab,項目名稱:sentence-transformers,代碼行數:18,代碼來源:WKPooling.py

示例5: test_rand

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def test_rand(self):
        """Tests uniform random variable generation on [0, 1)"""
        for size in [(10,), (10, 10), (10, 10, 10)]:
            randvec = crypten.rand(*size)
            self.assertTrue(randvec.size() == size, "Incorrect size")
            tensor = randvec.get_plain_text()
            self.assertTrue(
                (tensor >= 0).all() and (tensor < 1).all(), "Invalid values"
            )

        randvec = crypten.rand(int(1e6)).get_plain_text()
        mean = torch.mean(randvec)
        var = torch.var(randvec)
        self.assertTrue(torch.isclose(mean, torch.Tensor([0.5]), rtol=1e-3, atol=1e-3))
        self.assertTrue(
            torch.isclose(var, torch.Tensor([1.0 / 12]), rtol=1e-3, atol=1e-3)
        ) 
開發者ID:facebookresearch,項目名稱:CrypTen,代碼行數:19,代碼來源:test_crypten.py

示例6: _normalize

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def _normalize(self, x, mu, var):
        r"""Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``.

        Args:
            x (torch.Tensor): The Tensor to be normalized.
            mu (torch.Tensor): Mean using which the Tensor is to be normalized.
            var (torch.Tensor): Variance used in the normalization of ``x``.

        Returns:
            Normalized Tensor ``x``.
        """
        std = torch.sqrt(self.eps + var)
        x = (x - mu) / std
        sizes = list(x.size())
        for dim, i in enumerate(x.size()):
            if dim != 1:
                sizes[dim] = 1
        scale = self.scale.view(*sizes)
        bias = self.bias.view(*sizes)
        return x * scale + bias 
開發者ID:torchgan,項目名稱:torchgan,代碼行數:22,代碼來源:virtualbatchnorm.py

示例7: init_parameters

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def init_parameters(self, x, init_scale=0.05, eps=1e-8):
        if self.weightnorm:
            # initial values
            self.linear._parameters['weight_v'].data.normal_(mean=0, std=init_scale)
            self.linear._parameters['weight_g'].data.fill_(1.)
            self.linear._parameters['bias'].data.fill_(0.)
            init_scale = .01
            # data dependent init
            x = self.linear(x)
            m_init, v_init = torch.mean(x, 0), torch.var(x, 0)
            scale_init = init_scale / torch.sqrt(v_init + eps)
            self.linear._parameters['weight_g'].data = self.linear._parameters['weight_g'].data * scale_init.view(
                self.linear._parameters['weight_g'].data.size())
            self.linear._parameters['bias'].data = self.linear._parameters['bias'].data - m_init * scale_init
            self.initialized = True + self.initialized
            return scale_init[None, :] * (x - m_init[None, :]) 
開發者ID:vlievin,項目名稱:biva-pytorch,代碼行數:18,代碼來源:linear.py

示例8: test_distance_weighted_miner

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def test_distance_weighted_miner(self):
        embedding_angles = torch.arange(0, 180)
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.randint(low=0, high=2, size=(180,))
        a,_,n = lmu.get_all_triplets_indices(labels)
        all_an_dist = torch.nn.functional.pairwise_distance(embeddings[a], embeddings[n], 2)
        min_an_dist = torch.min(all_an_dist)
        
        for non_zero_cutoff_int in range(5, 15):
            non_zero_cutoff = (float(non_zero_cutoff_int) / 10.) - 0.01
            miner = DistanceWeightedMiner(0, non_zero_cutoff)
            a, p, n = miner(embeddings, labels)
            anchors, positives, negatives = embeddings[a], embeddings[p], embeddings[n]
            an_dist = torch.nn.functional.pairwise_distance(anchors, negatives, 2)
            self.assertTrue(torch.max(an_dist)<=non_zero_cutoff)
            an_dist_var = torch.var(an_dist)
            an_dist_mean = torch.mean(an_dist)
            target_var = ((non_zero_cutoff - min_an_dist)**2) / 12 # variance formula for uniform distribution
            target_mean = (non_zero_cutoff - min_an_dist) / 2
            self.assertTrue(torch.abs(an_dist_var-target_var)/target_var < 0.1)
            self.assertTrue(torch.abs(an_dist_mean-target_mean)/target_mean < 0.1) 
開發者ID:KevinMusgrave,項目名稱:pytorch-metric-learning,代碼行數:23,代碼來源:test_distance_weighted_miner.py

示例9: _value_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def _value_loss(self, obs, returns):
        """
        Computes the loss with current data. also returns a dictionary of statistics
        which includes value loss and explained variance
        return: surreal.utils.pytorch.GPUVariable, dict
        Args:
            obs: batch of observations in form of (batch_size, obs_dim)
            returns: batch of N-step return estimate (batch_size,)
        Returns:
            loss: Variable for loss
            stats: dictionary of recorded statistics
        """
        values = self.model.forward_critic(obs, self.cells)
        if len(values.size()) == 3: values = values.squeeze(2)
        explained_var = 1 - torch.var(returns - values) / torch.var(returns)
        loss = (values - returns).pow(2).mean()

        stats = {
            '_val_loss': loss.item(),
            '_val_explained_var': explained_var.item()
        }
        return loss, stats 
開發者ID:SurrealAI,項目名稱:surreal,代碼行數:24,代碼來源:ppo.py

示例10: instance_std

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def instance_std(x, eps=1e-5):
    var = torch.var(x, dim=(2, 3), keepdim=True)
    std = torch.sqrt(var + eps)
    return std 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:6,代碼來源:functional.py

示例11: group_std

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def group_std(x: torch.Tensor, groups=32, eps=1e-5):
    n, c, h, w = x.size()
    x = torch.reshape(x, (n, groups, c // groups, h, w))
    var = torch.var(x, dim=(2, 3, 4), keepdim=True)
    std = torch.sqrt(var + eps)
    return torch.reshape(std, (n, c, h, w)) 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:8,代碼來源:functional.py

示例12: calc_mean_invstddev

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def calc_mean_invstddev(feature):
    if len(feature.shape) != 2:
        raise ValueError("We expect the input feature to be 2-D tensor")
    mean = torch.mean(feature, dim=0)
    var = torch.var(feature, dim=0)
    # avoid division by ~zero
    if (var < sys.float_info.epsilon).any():
        return mean, 1.0 / (torch.sqrt(var) + sys.float_info.epsilon)
    return mean, 1.0 / torch.sqrt(var) 
開發者ID:pytorch,項目名稱:audio,代碼行數:11,代碼來源:utils.py

示例13: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avgpool(x)  # .view(b, c)
        var = torch.var(x, dim=(2, 3)).view(b, c, 1, 1)
        y *= (var + 1e-3).rsqrt()
        # y = torch.cat((y, var), dim=1)
        return self.attention(y).view(b, self.k)


# TODO: keep it to use FP32 always, need to figure out how to set it using apex ? 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:12,代碼來源:mixture_batchnorm.py

示例14: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def forward(self, x, logpx=None):
        c = x.size(1)

        if not self.initialized:
            with torch.no_grad():
                # compute batch statistics
                x_t = x.transpose(0, 1).contiguous().view(c, -1)
                batch_mean = torch.mean(x_t, dim=1)
                batch_var = torch.var(x_t, dim=1)

                # for numerical issues
                batch_var = torch.max(batch_var, torch.tensor(0.2).to(batch_var))

                self.bias.data.copy_(-batch_mean)
                self.weight.data.copy_(-0.5 * torch.log(batch_var))
                self.initialized.fill_(1)

        bias = self.bias.view(*self.shape).expand_as(x)
        weight = self.weight.view(*self.shape).expand_as(x)

        y = (x + bias) * torch.exp(weight)

        if logpx is None:
            return y
        else:
            return y, logpx - self._logdetgrad(x) 
開發者ID:rtqichen,項目名稱:residual-flows,代碼行數:28,代碼來源:act_norm.py

示例15: _normalize

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import var [as 別名]
def _normalize(self, x, mean, var):
        """
        Normalize activations.
        :param x: input activations
        :param mean: mean used to normalize
        :param var: var used to normalize
        :return: normalized activations
        """
        return (self.weight.view(1, -1, 1, 1) * (x - mean) / torch.sqrt(var + self.eps)) + self.bias.view(1, -1, 1, 1) 
開發者ID:cambridge-mlg,項目名稱:cnaps,代碼行數:11,代碼來源:normalization_layers.py


注:本文中的torch.var方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。