當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.rsqrt方法代碼示例

本文整理匯總了Python中torch.rsqrt方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.rsqrt方法的具體用法?Python torch.rsqrt怎麽用?Python torch.rsqrt使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.rsqrt方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, input):
        if get_world_size() == 1 or not self.training:
            return super().forward(input)

        assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs"
        C = input.shape[1]
        mean = torch.mean(input, dim=[0, 2, 3])
        meansqr = torch.mean(input * input, dim=[0, 2, 3])

        vec = torch.cat([mean, meansqr], dim=0)
        vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())

        mean, meansqr = torch.split(vec, C)
        var = meansqr - mean * mean
        self.running_mean += self.momentum * (mean.detach() - self.running_mean)
        self.running_var += self.momentum * (var.detach() - self.running_var)

        invstd = torch.rsqrt(var + self.eps)
        scale = self.weight * invstd
        bias = self.bias - mean * scale
        scale = scale.reshape(1, -1, 1, 1)
        bias = bias.reshape(1, -1, 1, 1)
        return input * scale + bias 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:25,代碼來源:batch_norm.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, x):
        inv_var = torch.rsqrt(self.running_var + self.eps)
        if self.affine:
            alpha = self.weight * inv_var
            beta = self.bias - self.running_mean * alpha
        else:
            alpha = inv_var
            beta = - self.running_mean * alpha

        x.mul_(alpha.view(self._broadcast_shape(x)))
        x.add_(beta.view(self._broadcast_shape(x)))

        if self.activation == "relu":
            return functional.relu(x, inplace=True)
        elif self.activation == "leaky_relu":
            return functional.leaky_relu(x, negative_slope=self.activation_param, inplace=True)
        elif self.activation == "elu":
            return functional.elu(x, alpha=self.activation_param, inplace=True)
        elif self.activation == "identity":
            return x
        else:
            raise RuntimeError("Unknown activation function {}".format(self.activation)) 
開發者ID:mapillary,項目名稱:seamseg,代碼行數:24,代碼來源:misc.py

示例3: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, z):
        if z.size(-1) == 1:
            return z

        mu = torch.mean(z, keepdim=True, dim=-1)
        sigma = torch.std(z, keepdim=True, dim=-1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        if self.affine:
            ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        # NOTE(nikita): the t2t code does the following instead, with eps=1e-6
        # However, I currently have no reason to believe that this difference in
        # implementation matters.
        # mu = torch.mean(z, keepdim=True, dim=-1)
        # variance = torch.mean((z - mu.expand_as(z))**2, keepdim=True, dim=-1)
        # ln_out = (z - mu.expand_as(z)) * torch.rsqrt(variance + self.eps).expand_as(z)
        # ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        return ln_out

# %% 
開發者ID:nikitakit,項目名稱:self-attentive-parser,代碼行數:23,代碼來源:parse_nk.py

示例4: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, x):
        """
        Input Variables:
        ----------------
            x: Input tensor of shape [NxCxHxW]
        """

        n, c, h, w = x.shape
        assert (self.gamma.shape[1], self.beta.shape[1], self.tau.shape[1]) == (c, c, c)

        # Compute the mean norm of activations per channel
        nu2 = torch.mean(x.pow(2), (2,3), keepdims=True)
        # Perform FRN
        x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
        # Return after applying the Offset-ReLU non-linearity
        return torch.max(self.gamma*x + self.beta, self.tau) 
開發者ID:openseg-group,項目名稱:openseg.pytorch,代碼行數:18,代碼來源:frn.py

示例5: __p_k

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def __p_k(self, x, mu, var):
        """
        Returns a tensor with dimensions (n, k, 1) indicating the likelihood of data belonging to the k-th Gaussian.
        args:
            x:      torch.Tensor (n, k, d)
            mu:     torch.Tensor (1, k, d)
            var:    torch.Tensor (1, k, d)
        returns:
            p_k:    torch.Tensor (n, k, 1)
        """

        # (1, k, d) --> (n, k, d)
        mu = mu.expand(x.size(0), self.n_components, self.n_features)
        var = var.expand(x.size(0), self.n_components, self.n_features)

        # (n, k, d) --> (n, k, 1)
        exponent = torch.exp(-.5 * torch.sum((x - mu) * (x - mu) / var, 2, keepdim=True))
        # (n, k, d) --> (n, k, 1)
        prefactor = torch.rsqrt(((2. * pi) ** self.n_features) * torch.prod(var, dim=2, keepdim=True) + self.eps)

        return prefactor * exponent 
開發者ID:kumar-shridhar,項目名稱:PyTorch-BayesianCNN,代碼行數:23,代碼來源:gmm.py

示例6: fused_bn

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def fused_bn(x, mean, var, gain=None, bias=None, eps=1e-5):
  # Apply scale and shift--if gain and bias are provided, fuse them here
  # Prepare scale
  scale = torch.rsqrt(var + eps)
  # If a gain is provided, use it
  if gain is not None:
    scale = scale * gain
  # Prepare shift
  shift = mean * scale
  # If bias is provided, use it
  if bias is not None:
    shift = shift - bias
  return x * scale - shift
  #return ((x - mean) / ((var + eps) ** 0.5)) * gain + bias # The unfused way.


# Manual BN
# Calculate means and variances using mean-of-squares minus mean-squared 
開發者ID:ajbrock,項目名稱:BigGAN-PyTorch,代碼行數:20,代碼來源:layers.py

示例7: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, input, noise):
        # assumes input.dim() == 4, TODO: generalize that.

        shift = self.shift_conv.forward(noise)
        scale = self.scale_conv.forward(noise)
        size = input.size()
        x_reshaped = input.view(size[0], size[1], size[2]*size[3])
        mean = x_reshaped.mean(2, keepdim=True)
        var = x_reshaped.var(2, keepdim=True)
        std =  torch.rsqrt(var + self.eps)
        norm_features = ((x_reshaped - mean) * std).view(*size)
        output = norm_features * scale + shift
        return output


######################################################################
# A modified resnet block which allows for passing additional noise input
# to be used for conditional instance norm
###################################################################### 
開發者ID:aalmah,項目名稱:augmented_cyclegan,代碼行數:21,代碼來源:modules.py

示例8: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, x):
        """
        0, 1, 2, 3 -> (B, H, W, C) in TensorFlow
        0, 1, 2, 3 -> (B, C, H, W) in PyTorch
        TensorFlow code
            nu2 = tf.reduce_mean(tf.square(x), axis=[1, 2], keepdims=True)
            x = x * tf.rsqrt(nu2 + tf.abs(eps))
            # This Code include TLU function max(y, tau)
            return tf.maximum(gamma * x + beta, tau)
        """
        # Compute the mean norm of activations per channel.
        nu2 = x.pow(2).mean(dim=[2, 3], keepdim=True)

        # Perform FRN.
        x = x * torch.rsqrt(nu2 + self.eps.abs())

        # Scale and Bias
        x = self.weight.view(1, self.num_features, 1, 1) * x + self.bias.view(1, self.num_features, 1, 1)
        # x = self.weight * x + self.bias
        return x 
開發者ID:JDAI-CV,項目名稱:fast-reid,代碼行數:22,代碼來源:frn.py

示例9: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, x):
        h = x.unsqueeze(2).unsqueeze(3)
        if self.normalize_latents:
            mean = torch.mean(h * h, 1, keepdim=True)
            dom = torch.rsqrt(mean + self.eps)
            h = h * dom
        h = self.block0(h, self.depth == 0)
        if self.depth > 0:
            for i in range(self.depth - 1):
                h = F.upsample(h, scale_factor=2)
                h = self.blocks[i](h)
            h = F.upsample(h, scale_factor=2)
            ult = self.blocks[self.depth - 1](h, True)
            if self.alpha < 1.0:
                if self.depth > 1:
                    preult_rgb = self.blocks[self.depth - 2].toRGB(h)
                else:
                    preult_rgb = self.block0.toRGB(h)
            else:
                preult_rgb = 0
            h = preult_rgb * (1-self.alpha) + ult * self.alpha
        return h 
開發者ID:deepsound-project,項目名稱:pggan-pytorch,代碼行數:24,代碼來源:network.py

示例10: comp_simi

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def comp_simi(inputs):
	"""Compute Similarity
	"""
	values, indices = torch.max(inputs, 1)
	thres = 0.9
	weights = values.ge(thres)
	weights = weights.type(torch.cuda.FloatTensor)
	[batch_size, dim] = inputs.shape
	indices = torch.unsqueeze(indices.cpu(), 1)
	one_hot_labels = torch.zeros(batch_size, dim).scatter_(1, indices, 1)
	one_hot_labels = one_hot_labels.cuda()
	inputs2 = torch.mul(inputs, inputs)
	norm2 = torch.sum(inputs2, 1)
	root_inv = torch.rsqrt(norm2)
	tmp_var1 = root_inv.expand(dim,batch_size)
	tmp_var2 = torch.t(tmp_var1)
	nml_inputs = torch.mul(inputs, tmp_var2)
	similarity = torch.matmul(nml_inputs, torch.t(nml_inputs))			
	similarity2 = similarity - torch.eye(batch_size).cuda()
	return similarity, one_hot_labels, weights 
開發者ID:Cory-M,項目名稱:DCCM,代碼行數:22,代碼來源:functions.py

示例11: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, x):
        if x.requires_grad:
            # When gradients are needed, F.batch_norm will use extra memory
            # because its backward op computes gradients for weight/bias as well.
            scale = self.weight * (self.running_var + self.eps).rsqrt()
            bias = self.bias - self.running_mean * scale
            scale = scale.reshape(1, -1, 1, 1)
            bias = bias.reshape(1, -1, 1, 1)
            return x * scale + bias
        else:
            # When gradients are not needed, F.batch_norm is a single fused op
            # and provide more optimization opportunities.
            return F.batch_norm(
                x,
                self.running_mean,
                self.running_var,
                self.weight,
                self.bias,
                training=False,
                eps=self.eps,
            ) 
開發者ID:facebookresearch,項目名稱:detectron2,代碼行數:23,代碼來源:batch_norm.py

示例12: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, input):
        if comm.get_world_size() == 1 or not self.training:
            return super().forward(input)

        assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs"
        C = input.shape[1]
        mean = torch.mean(input, dim=[0, 2, 3])
        meansqr = torch.mean(input * input, dim=[0, 2, 3])

        vec = torch.cat([mean, meansqr], dim=0)
        vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())

        mean, meansqr = torch.split(vec, C)
        var = meansqr - mean * mean
        self.running_mean += self.momentum * (mean.detach() - self.running_mean)
        self.running_var += self.momentum * (var.detach() - self.running_var)

        invstd = torch.rsqrt(var + self.eps)
        scale = self.weight * invstd
        bias = self.bias - mean * scale
        scale = scale.reshape(1, -1, 1, 1)
        bias = bias.reshape(1, -1, 1, 1)
        return input * scale + bias 
開發者ID:conansherry,項目名稱:detectron2,代碼行數:25,代碼來源:batch_norm.py

示例13: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, input):
        if du.get_local_size() == 1 or not self.training:
            return super().forward(input)

        assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs"
        C = input.shape[1]
        mean = torch.mean(input, dim=[0, 2, 3, 4])
        meansqr = torch.mean(input * input, dim=[0, 2, 3, 4])

        vec = torch.cat([mean, meansqr], dim=0)
        vec = GroupGather.apply(vec, self.num_sync_devices, self.num_groups) * (
            1.0 / self.num_sync_devices
        )

        mean, meansqr = torch.split(vec, C)
        var = meansqr - mean * mean
        self.running_mean += self.momentum * (mean.detach() - self.running_mean)
        self.running_var += self.momentum * (var.detach() - self.running_var)

        invstd = torch.rsqrt(var + self.eps)
        scale = self.weight * invstd
        bias = self.bias - mean * scale
        scale = scale.reshape(1, -1, 1, 1, 1)
        bias = bias.reshape(1, -1, 1, 1, 1)
        return input * scale + bias 
開發者ID:facebookresearch,項目名稱:SlowFast,代碼行數:27,代碼來源:batchnorm_helper.py

示例14: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, x, y):

        # x: N x C x W x H
        batchSize, nChannel, width, height = x.size()
        tmpX = x.view(batchSize, nChannel, -1)
        mux = tmpX.mean(dim=2).view(batchSize, nChannel, 1, 1)
        varx = torch.clamp((tmpX*tmpX).mean(dim=2).view(batchSize, nChannel, 1, 1) - mux*mux, min=0)
        varx = torch.rsqrt(varx + self.epsilon)
        x = (x - mux) * varx

        # Adapt style
        styleY = self.styleModulator(y)
        yA = styleY[:, : self.dimOut].view(batchSize, self.dimOut, 1, 1)
        yB = styleY[:, self.dimOut:].view(batchSize, self.dimOut, 1, 1)

        return yA * x + yB 
開發者ID:facebookresearch,項目名稱:pytorch_GAN_zoo,代碼行數:18,代碼來源:styleGAN.py

示例15: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rsqrt [as 別名]
def forward(self, input):
        if comm.get_world_size() == 1 or not self.training:
            return super().forward(input)

        assert input.shape[0] > 0, "SyncBatchNorm does not support empty input"
        C = input.shape[1]
        mean = torch.mean(input, dim=[0, 2, 3])
        meansqr = torch.mean(input * input, dim=[0, 2, 3])

        vec = torch.cat([mean, meansqr], dim=0)
        vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())

        mean, meansqr = torch.split(vec, C)
        var = meansqr - mean * mean
        self.running_mean += self.momentum * (mean.detach() - self.running_mean)
        self.running_var += self.momentum * (var.detach() - self.running_var)

        invstd = torch.rsqrt(var + self.eps)
        scale = self.weight * invstd
        bias = self.bias - mean * scale
        scale = scale.reshape(1, -1, 1, 1)
        bias = bias.reshape(1, -1, 1, 1)
        return input * scale + bias 
開發者ID:poodarchu,項目名稱:Det3D,代碼行數:25,代碼來源:norm.py


注:本文中的torch.rsqrt方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。