本文整理汇总了Python中torch.var方法的典型用法代码示例。如果您正苦于以下问题:Python torch.var方法的具体用法?Python torch.var怎么用?Python torch.var使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.var方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evo_norm
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def evo_norm(x, prefix, running_var, v, weight, bias,
training, momentum, eps=0.1, groups=32):
if prefix == 'b0':
if training:
var = torch.var(x, dim=(0, 2, 3), keepdim=True)
running_var.mul_(momentum)
running_var.add_((1 - momentum) * var)
else:
var = running_var
if v is not None:
den = torch.max((var + eps).sqrt(), v * x + instance_std(x, eps))
x = x / den * weight + bias
else:
x = x * weight + bias
else:
if v is not None:
x = x * torch.sigmoid(v * x) / group_std(x,
groups, eps) * weight + bias
else:
x = x * weight + bias
return x
示例2: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def forward(self, x, y):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma, alpha = self.embed(y).chunk(2, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
示例3: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def forward(self, inputs, context=None):
if inputs.dim() != 2:
raise ValueError('Expected 2-dim inputs, got inputs of shape: {}'.format(inputs.shape))
if self.training:
mean, var = inputs.mean(0), inputs.var(0)
self.running_mean.mul_(1 - self.momentum).add_(mean * self.momentum)
self.running_var.mul_(1 - self.momentum).add_(var * self.momentum)
else:
mean, var = self.running_mean, self.running_var
outputs = self.weight * ((inputs - mean) / torch.sqrt((var + self.eps))) + self.bias
logabsdet_ = torch.log(self.weight) - 0.5 * torch.log(var + self.eps)
logabsdet = torch.sum(logabsdet_) * torch.ones(inputs.shape[0])
return outputs, logabsdet
示例4: unify_sentence
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def unify_sentence(self, sentence_feature, one_sentence_embedding):
"""
Unify Sentence By Token Importance
"""
sent_len = one_sentence_embedding.size()[0]
var_token = torch.zeros(sent_len, device=one_sentence_embedding.device)
for token_index in range(sent_len):
token_feature = sentence_feature[:, token_index, :]
sim_map = self.cosine_similarity_torch(token_feature)
var_token[token_index] = torch.var(sim_map.diagonal(-1))
var_token = var_token / torch.sum(var_token)
sentence_embedding = torch.mv(one_sentence_embedding.t(), var_token)
return sentence_embedding
示例5: test_rand
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def test_rand(self):
"""Tests uniform random variable generation on [0, 1)"""
for size in [(10,), (10, 10), (10, 10, 10)]:
randvec = crypten.rand(*size)
self.assertTrue(randvec.size() == size, "Incorrect size")
tensor = randvec.get_plain_text()
self.assertTrue(
(tensor >= 0).all() and (tensor < 1).all(), "Invalid values"
)
randvec = crypten.rand(int(1e6)).get_plain_text()
mean = torch.mean(randvec)
var = torch.var(randvec)
self.assertTrue(torch.isclose(mean, torch.Tensor([0.5]), rtol=1e-3, atol=1e-3))
self.assertTrue(
torch.isclose(var, torch.Tensor([1.0 / 12]), rtol=1e-3, atol=1e-3)
)
示例6: _normalize
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def _normalize(self, x, mu, var):
r"""Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``.
Args:
x (torch.Tensor): The Tensor to be normalized.
mu (torch.Tensor): Mean using which the Tensor is to be normalized.
var (torch.Tensor): Variance used in the normalization of ``x``.
Returns:
Normalized Tensor ``x``.
"""
std = torch.sqrt(self.eps + var)
x = (x - mu) / std
sizes = list(x.size())
for dim, i in enumerate(x.size()):
if dim != 1:
sizes[dim] = 1
scale = self.scale.view(*sizes)
bias = self.bias.view(*sizes)
return x * scale + bias
示例7: init_parameters
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def init_parameters(self, x, init_scale=0.05, eps=1e-8):
if self.weightnorm:
# initial values
self.linear._parameters['weight_v'].data.normal_(mean=0, std=init_scale)
self.linear._parameters['weight_g'].data.fill_(1.)
self.linear._parameters['bias'].data.fill_(0.)
init_scale = .01
# data dependent init
x = self.linear(x)
m_init, v_init = torch.mean(x, 0), torch.var(x, 0)
scale_init = init_scale / torch.sqrt(v_init + eps)
self.linear._parameters['weight_g'].data = self.linear._parameters['weight_g'].data * scale_init.view(
self.linear._parameters['weight_g'].data.size())
self.linear._parameters['bias'].data = self.linear._parameters['bias'].data - m_init * scale_init
self.initialized = True + self.initialized
return scale_init[None, :] * (x - m_init[None, :])
示例8: test_distance_weighted_miner
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def test_distance_weighted_miner(self):
embedding_angles = torch.arange(0, 180)
embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
labels = torch.randint(low=0, high=2, size=(180,))
a,_,n = lmu.get_all_triplets_indices(labels)
all_an_dist = torch.nn.functional.pairwise_distance(embeddings[a], embeddings[n], 2)
min_an_dist = torch.min(all_an_dist)
for non_zero_cutoff_int in range(5, 15):
non_zero_cutoff = (float(non_zero_cutoff_int) / 10.) - 0.01
miner = DistanceWeightedMiner(0, non_zero_cutoff)
a, p, n = miner(embeddings, labels)
anchors, positives, negatives = embeddings[a], embeddings[p], embeddings[n]
an_dist = torch.nn.functional.pairwise_distance(anchors, negatives, 2)
self.assertTrue(torch.max(an_dist)<=non_zero_cutoff)
an_dist_var = torch.var(an_dist)
an_dist_mean = torch.mean(an_dist)
target_var = ((non_zero_cutoff - min_an_dist)**2) / 12 # variance formula for uniform distribution
target_mean = (non_zero_cutoff - min_an_dist) / 2
self.assertTrue(torch.abs(an_dist_var-target_var)/target_var < 0.1)
self.assertTrue(torch.abs(an_dist_mean-target_mean)/target_mean < 0.1)
示例9: _value_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def _value_loss(self, obs, returns):
"""
Computes the loss with current data. also returns a dictionary of statistics
which includes value loss and explained variance
return: surreal.utils.pytorch.GPUVariable, dict
Args:
obs: batch of observations in form of (batch_size, obs_dim)
returns: batch of N-step return estimate (batch_size,)
Returns:
loss: Variable for loss
stats: dictionary of recorded statistics
"""
values = self.model.forward_critic(obs, self.cells)
if len(values.size()) == 3: values = values.squeeze(2)
explained_var = 1 - torch.var(returns - values) / torch.var(returns)
loss = (values - returns).pow(2).mean()
stats = {
'_val_loss': loss.item(),
'_val_explained_var': explained_var.item()
}
return loss, stats
示例10: instance_std
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def instance_std(x, eps=1e-5):
var = torch.var(x, dim=(2, 3), keepdim=True)
std = torch.sqrt(var + eps)
return std
示例11: group_std
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def group_std(x: torch.Tensor, groups=32, eps=1e-5):
n, c, h, w = x.size()
x = torch.reshape(x, (n, groups, c // groups, h, w))
var = torch.var(x, dim=(2, 3, 4), keepdim=True)
std = torch.sqrt(var + eps)
return torch.reshape(std, (n, c, h, w))
示例12: calc_mean_invstddev
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def calc_mean_invstddev(feature):
if len(feature.shape) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = torch.mean(feature, dim=0)
var = torch.var(feature, dim=0)
# avoid division by ~zero
if (var < sys.float_info.epsilon).any():
return mean, 1.0 / (torch.sqrt(var) + sys.float_info.epsilon)
return mean, 1.0 / torch.sqrt(var)
示例13: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def forward(self, x):
b, c, _, _ = x.size()
y = self.avgpool(x) # .view(b, c)
var = torch.var(x, dim=(2, 3)).view(b, c, 1, 1)
y *= (var + 1e-3).rsqrt()
# y = torch.cat((y, var), dim=1)
return self.attention(y).view(b, self.k)
# TODO: keep it to use FP32 always, need to figure out how to set it using apex ?
示例14: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def forward(self, x, logpx=None):
c = x.size(1)
if not self.initialized:
with torch.no_grad():
# compute batch statistics
x_t = x.transpose(0, 1).contiguous().view(c, -1)
batch_mean = torch.mean(x_t, dim=1)
batch_var = torch.var(x_t, dim=1)
# for numerical issues
batch_var = torch.max(batch_var, torch.tensor(0.2).to(batch_var))
self.bias.data.copy_(-batch_mean)
self.weight.data.copy_(-0.5 * torch.log(batch_var))
self.initialized.fill_(1)
bias = self.bias.view(*self.shape).expand_as(x)
weight = self.weight.view(*self.shape).expand_as(x)
y = (x + bias) * torch.exp(weight)
if logpx is None:
return y
else:
return y, logpx - self._logdetgrad(x)
示例15: _normalize
# 需要导入模块: import torch [as 别名]
# 或者: from torch import var [as 别名]
def _normalize(self, x, mean, var):
"""
Normalize activations.
:param x: input activations
:param mean: mean used to normalize
:param var: var used to normalize
:return: normalized activations
"""
return (self.weight.view(1, -1, 1, 1) * (x - mean) / torch.sqrt(var + self.eps)) + self.bias.view(1, -1, 1, 1)