本文整理汇总了Python中torch.abs函数的典型用法代码示例。如果您正苦于以下问题:Python abs函数的具体用法?Python abs怎么用?Python abs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了abs函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: kurtosis_score
def kurtosis_score(x, dim=0):
'''Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
ripoff from: `scipy.stats.kurtosistest`.
Args:
a: Array of the sample data
axis: Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns:
statistic: The computed z-score for this test.
p-value: A 2-sided chi squared probability for the hypothesis test.
'''
x, n, dim = _x_n_dim(x, dim)
if n < 20:
raise ValueError(
"Number of elements has to be >= 20 to compute kurtosis")
b2 = (x**4).mean(dim) / (x**2).mean(dim)**2
E = 3.0 * (n - 1) / (n + 1)
varb2 = 24.0 * n * (n - 2) * (n - 3) / ((n + 1)**2 * (n + 3) * (n + 5))
x = (b2 - E) / math.sqrt(varb2)
sqrtbeta1 = 6.0 * (n * n - 5 * n + 2) / ((n + 7) * (n + 9)) *\
math.sqrt((6.0 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3)))
A = 6.0 + 8.0 / sqrtbeta1 * \
(2.0 / sqrtbeta1 + math.sqrt(1 + 4.0 / (sqrtbeta1**2)))
term1 = 1 - 2 / (9.0 * A)
denom = 1 + x * math.sqrt(2 / (A - 4.0))
term2 = torch.sign(denom) * torch.pow((1 - 2.0 / A) /
torch.abs(denom), 1 / 3.0)
Z = (term1 - term2) / math.sqrt(2 / (9.0 * A))
return Z, 1 + torch.erf(-math.sqrt(0.5) * torch.abs(Z))
示例2: forward
def forward(self, agent_qs, states):
"""Forward pass for the mixer.
Arguments:
agent_qs: Tensor of shape [B, T, n_agents, n_actions]
states: Tensor of shape [B, T, state_dim]
"""
bs = agent_qs.size(0)
states = states.reshape(-1, self.state_dim)
agent_qs = agent_qs.view(-1, 1, self.n_agents)
# First layer
w1 = th.abs(self.hyper_w_1(states))
b1 = self.hyper_b_1(states)
w1 = w1.view(-1, self.n_agents, self.embed_dim)
b1 = b1.view(-1, 1, self.embed_dim)
hidden = F.elu(th.bmm(agent_qs, w1) + b1)
# Second layer
w_final = th.abs(self.hyper_w_final(states))
w_final = w_final.view(-1, self.embed_dim, 1)
# State-dependent bias
v = self.V(states).view(-1, 1, 1)
# Compute final output
y = th.bmm(hidden, w_final) + v
# Reshape and return
q_tot = y.view(bs, -1, 1)
return q_tot
示例3: __call__
def __call__(self, module, features):
statistics = self.get_statistics(features)
self.statistics = statistics
if self.mode == 'store':
self.stored[module] = statistics.detach()
elif self.mode == 'match':
if statistics.ndimension() == 2:
if self.method == 'maximize':
self.losses[module] = - statistics[0, self.map_index]
else:
self.losses[module] = torch.abs(300 - statistics[0, self.map_index])
else:
ws = self.window_size
t = statistics.detach() * 0
s_cc = statistics[:1, :, t.shape[2] // 2 - ws:t.shape[2] // 2 + ws, t.shape[3] // 2 - ws:t.shape[3] // 2 + ws] #* 1.0
t_cc = t[:1, :, t.shape[2] // 2 - ws:t.shape[2] // 2 + ws, t.shape[3] // 2 - ws:t.shape[3] // 2 + ws] #* 1.0
t_cc[:, self.map_index,...] = 1
if self.method == 'maximize':
self.losses[module] = -(s_cc * t_cc.contiguous()).sum()
else:
self.losses[module] = torch.abs(200 -(s_cc * t_cc.contiguous())).sum()
示例4: test_MultivariateNormalQMCEngineDegenerate
def test_MultivariateNormalQMCEngineDegenerate(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z)
mean = torch.zeros(3, device=device, dtype=dtype)
cov = torch.tensor(
[[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=device, dtype=dtype
)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=2000)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.abs(torch.std(samples[:, 0]) - 1) < 1e-2)
self.assertTrue(torch.abs(torch.std(samples[:, 1]) - 1) < 1e-2)
self.assertTrue(torch.abs(torch.std(samples[:, 2]) - math.sqrt(2)) < 1e-2)
for i in (0, 1, 2):
_, pval = shapiro(samples[:, i].cpu().numpy())
self.assertGreater(pval, 0.9)
cov = np.cov(samples.cpu().numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
self.assertLess(np.abs(cov[0, 2] - 1), 1e-2)
# check to see if X + Y = Z almost exactly
self.assertTrue(
torch.all(
torch.abs(samples[:, 0] + samples[:, 1] - samples[:, 2]) < 1e-5
)
)
示例5: simplax
def simplax(surrogate, x, logits, mixtureweights, k=1):
B = logits.shape[0]
probs = torch.softmax(logits, dim=1)
cat = RelaxedOneHotCategorical(probs=probs, temperature=torch.tensor([1.]).cuda())
outputs = {}
net_loss = 0
surr_loss = 0
for jj in range(k):
cluster_S = cat.rsample()
cluster_H = H(cluster_S)
logq = cat.log_prob(cluster_S.detach()).view(B,1)
logpx_given_z = logprob_undercomponent(x, component=cluster_H)
logpz = torch.log(mixtureweights[cluster_H]).view(B,1)
logpxz = logpx_given_z + logpz #[B,1]
f = logpxz - logq - 1.
surr_input = torch.cat([cluster_S, x, logits], dim=1) #[B,21]
surr_pred = surrogate.net(surr_input)
net_loss += - torch.mean((f.detach() - surr_pred.detach()) * logq + surr_pred)
# surr_loss += torch.mean(torch.abs(f.detach()-1.-surr_pred))
# grad_logq = torch.mean( torch.autograd.grad([torch.mean(logq)], [logits], create_graph=True, retain_graph=True)[0], dim=1, keepdim=True)
# grad_surr = torch.mean( torch.autograd.grad([torch.mean(surr_pred)], [logits], create_graph=True, retain_graph=True)[0], dim=1, keepdim=True)
grad_logq = torch.autograd.grad([torch.mean(logq)], [logits], create_graph=True, retain_graph=True)[0]
grad_surr = torch.autograd.grad([torch.mean(surr_pred)], [logits], create_graph=True, retain_graph=True)[0]
surr_loss = torch.mean(((f.detach() - surr_pred) * grad_logq + grad_surr)**2)
surr_dif = torch.mean(torch.abs(f.detach() - surr_pred))
# surr_loss = torch.mean(torch.abs(f.detach() - surr_pred))
grad_path = torch.autograd.grad([torch.mean(surr_pred)], [logits], create_graph=True, retain_graph=True)[0]
grad_score = torch.autograd.grad([torch.mean((f.detach() - surr_pred.detach()) * logq)], [logits], create_graph=True, retain_graph=True)[0]
grad_path = torch.mean(torch.abs(grad_path))
grad_score = torch.mean(torch.abs(grad_score))
net_loss = net_loss/ k
surr_loss = surr_loss/ k
outputs['net_loss'] = net_loss
outputs['f'] = f
outputs['logpx_given_z'] = logpx_given_z
outputs['logpz'] = logpz
outputs['logq'] = logq
outputs['surr_loss'] = surr_loss
outputs['surr_dif'] = surr_dif
outputs['grad_path'] = grad_path
outputs['grad_score'] = grad_score
return outputs #net_loss, f, logpx_given_z, logpz, logq, surr_loss, surr_dif, grad_path, grad_score
示例6: argwhere_nonzero
def argwhere_nonzero(layer, batchnorm=False):
indices=[]
# for batchnorms we want to do the opposite
if batchnorm:
for idx,w in enumerate(layer):
if torch.sum(torch.abs(w)).data.cpu().numpy() == 0.:
indices.append(idx)
else:
for idx,w in enumerate(layer):
if torch.sum(torch.abs(w)).data.cpu().numpy() != 0.:
indices.append(idx)
return indices
示例7: test_NormalQMCEngineShapiroInvTransform
def test_NormalQMCEngineShapiroInvTransform(self):
engine = NormalQMCEngine(d=2, seed=12345, inv_transform=True)
samples = engine.draw(n=250)
self.assertEqual(samples.dtype, torch.float)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
示例8: pairwise_distance
def pairwise_distance(x1, x2, p=2, eps=1e-6):
r"""
Computes the batchwise pairwise distance between vectors v1,v2:
.. math ::
\Vert x \Vert _p := \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}
Args:
x1: first input tensor
x2: second input tensor
p: the norm degree. Default: 2
eps (float, optional): Small value to avoid division by zero. Default: 1e-6
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
Example::
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.pairwise_distance(input1, input2, p=2)
>>> output.backward()
"""
assert x1.size() == x2.size(), "Input sizes must be equal."
assert x1.dim() == 2, "Input must be a 2D matrix."
diff = torch.abs(x1 - x2)
out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
return torch.pow(out, 1. / p)
示例9: _mu_law
def _mu_law(self, x):
m = self._variable(torch.FloatTensor(1))
m[:] = self.n_categories + 1
s = torch.sign(x)
x = torch.abs(x)
x = s * (torch.log(1 + (self.n_categories * x)) / torch.log(m))
return x
示例10: forward
def forward(self, x1, x2):
out1 = self.forward_one(x1)
out2 = self.forward_one(x2)
dis = torch.abs(out1 - out2)
out = self.out(dis)
# return self.sigmoid(out)
return out
示例11: skewness_score
def skewness_score(x, dim=0):
'''Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
ripoff from: `scipy.stats.skewtest`.
Args:
a: Array of the sample data
axis: Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns:
statistic: The computed z-score for this test.
p-value: A 2-sided chi squared probability for the hypothesis test.
'''
x, n, dim = _x_n_dim(x, dim)
b2 = (x**3).mean(dim) / (x**2).mean(dim)**1.5
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = 3.0 * (n**2 + 27 * n - 70) * (n + 1) * (n + 3) /\
((n - 2.0) * (n + 5) * (n + 7) * (n + 9))
W2 = -1.0 + math.sqrt(2 * (beta2 - 1))
delta = 1.0 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y[y == 0] = 1
yalpha = y / alpha
Z = delta * torch.log(yalpha + torch.sqrt(yalpha**2 + 1))
return Z, 1 + torch.erf(-math.sqrt(0.5) * torch.abs(Z))
示例12: forward
def forward(self, frame, policies):
# x: [B,2,84,84]
self.B = frame.size()[0]
#Predict mask
pre_mask = self.predict_mask_nosigmoid(frame)
mask = F.sigmoid(pre_mask)
masked_frame = frame * mask
kls = []
for i in range(len(policies)):
policy = policies[i]
log_dist_mask = policy.action_logdist(masked_frame)
log_dist_true = policy.action_logdist(frame)
action_dist_kl = torch.sum((log_dist_true - log_dist_mask)*torch.exp(log_dist_true), dim=1) #[B]
action_dist_kl = torch.mean(action_dist_kl) # * 1000
kls.append(action_dist_kl)
kls = torch.stack(kls) #[policies, B]
action_dist_kl = torch.mean(action_dist_kl) #[1] #over batch and over policies
pre_mask = pre_mask.view(self.B, -1)
mask_cost = torch.abs(pre_mask + 20)
# mask_sum = torch.mean(torch.sum(mask_cost, dim=1)) * .00001
# mask_cost = torch.mean(mask_cost) * .00001
mask_cost = torch.mean(mask_cost) * .01
loss = action_dist_kl + mask_cost
return loss, action_dist_kl, mask_cost
示例13: test_train
def test_train(self):
self._metric.train()
calls = [[torch.FloatTensor([0.0]), torch.LongTensor([0])],
[torch.FloatTensor([0.0, 0.1, 0.2, 0.3]), torch.LongTensor([0, 1, 2, 3])]]
for i in range(len(self._states)):
self._metric.process(self._states[i])
self.assertEqual(2, len(self._metric_function.call_args_list))
for i in range(len(self._metric_function.call_args_list)):
self.assertTrue(torch.eq(self._metric_function.call_args_list[i][0][0], calls[i][0]).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[i][0][1], -calls[i][1])), 1e-12).all)
self._metric_function.reset_mock()
self._metric.process_final({})
self._metric_function.assert_called_once()
self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all)
示例14: test_mu_law_companding
def test_mu_law_companding(self):
sig = self.sig.clone()
quantization_channels = 256
sig = self.sig.numpy()
sig = sig / np.abs(sig).max()
self.assertTrue(sig.min() >= -1. and sig.max() <= 1.)
sig_mu = transforms.MuLawEncoding(quantization_channels)(sig)
self.assertTrue(sig_mu.min() >= 0. and sig.max() <= quantization_channels)
sig_exp = transforms.MuLawExpanding(quantization_channels)(sig_mu)
self.assertTrue(sig_exp.min() >= -1. and sig_exp.max() <= 1.)
sig = self.sig.clone()
sig = sig / torch.abs(sig).max()
self.assertTrue(sig.min() >= -1. and sig.max() <= 1.)
sig_mu = transforms.MuLawEncoding(quantization_channels)(sig)
self.assertTrue(sig_mu.min() >= 0. and sig.max() <= quantization_channels)
sig_exp = transforms.MuLawExpanding(quantization_channels)(sig_mu)
self.assertTrue(sig_exp.min() >= -1. and sig_exp.max() <= 1.)
repr_test = transforms.MuLawEncoding(quantization_channels)
repr_test.__repr__()
repr_test = transforms.MuLawExpanding(quantization_channels)
repr_test.__repr__()
示例15: test_regularization
def test_regularization(self):
penalty = self.model.get_regularization_penalty().data
assert (penalty > 0).all()
penalty2 = 0
# Config specifies penalty as
# "regularizer": [
# ["weight$", {"type": "l2", "alpha": 10}],
# ["bias$", {"type": "l1", "alpha": 5}]
# ]
for name, parameter in self.model.named_parameters():
if name.endswith("weight"):
weight_penalty = 10 * torch.sum(torch.pow(parameter, 2))
penalty2 += weight_penalty
elif name.endswith("bias"):
bias_penalty = 5 * torch.sum(torch.abs(parameter))
penalty2 += bias_penalty
assert (penalty == penalty2.data).all()
# You get a RuntimeError if you call `model.forward` twice on the same inputs.
# The data and config are such that the whole dataset is one batch.
training_batch = next(self.iterator(self.instances, num_epochs=1))
validation_batch = next(self.iterator(self.instances, num_epochs=1))
training_loss = self.trainer._batch_loss(training_batch, for_training=True).data
validation_loss = self.trainer._batch_loss(validation_batch, for_training=False).data
# Training loss should have the regularization penalty, but validation loss should not.
assert (training_loss != validation_loss).all()
# Training loss should equal the validation loss plus the penalty.
penalized = validation_loss + penalty
assert (training_loss == penalized).all()