本文整理汇总了Python中torch.sqrt函数的典型用法代码示例。如果您正苦于以下问题:Python sqrt函数的具体用法?Python sqrt怎么用?Python sqrt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sqrt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _gaussian_kl_divergence
def _gaussian_kl_divergence(self, p, q):
p_mean = p[0][:Z_DIM]
p_logstd = p[0][Z_DIM:]
p_var = T.sqrt(T.exp(p_logstd))
q_mean = q[0][:Z_DIM]
q_logstd = q[0][Z_DIM:]
q_var = T.sqrt(T.exp(q_logstd))
kl = (T.log(q_var/p_var) + (p_var + (p_mean-q_mean)*(p_mean-q_mean))/q_var - 1) * 0.5
return T.sum(kl)
示例2: rmsprop
def rmsprop(opfunc, x, config, state=None):
""" An implementation of RMSprop
ARGS:
- 'opfunc' : a function that takes a single input (X), the point
of a evaluation, and returns f(X) and df/dX
- 'x' : the initial point
- 'config` : a table with configuration parameters for the optimizer
- 'config['learningRate']' : learning rate
- 'config['alpha']' : smoothing constant
- 'config['epsilon']' : value with which to initialise m
- 'config['weightDecay']' : weight decay
- 'state' : a table describing the state of the optimizer;
after each call the state is modified
- 'state['m']' : leaky sum of squares of parameter gradients,
- 'state['tmp']' : and the square root (with epsilon smoothing)
RETURN:
- `x` : the new x vector
- `f(x)` : the function, evaluated before the update
"""
# (0) get/update state
if config is None and state is None:
raise ValueError("rmsprop requires a dictionary to retain state between iterations")
state = state if state is not None else config
lr = config.get('learningRate', 1e-2)
alpha = config.get('alpha', 0.99)
epsilon = config.get('epsilon', 1e-8)
wd = config.get('weightDecay', 0)
# (1) evaluate f(x) and df/dx
fx, dfdx = opfunc(x)
# (2) weight decay
if wd != 0:
dfdx.add_(wd, x)
# (3) initialize mean square values and square gradient storage
if 'm' not in state:
state['m'] = x.new().resize_as_(dfdx).zero_()
state['tmp'] = x.new().resize_as_(dfdx)
# (4) calculate new (leaky) mean squared values
state['m'].mul_(alpha)
state['m'].addcmul_(1.0 - alpha, dfdx, dfdx)
# (5) perform update
torch.sqrt(state['m'], out=state['tmp']).add_(epsilon)
x.addcdiv_(-lr, dfdx, state['tmp'])
# return x*, f(x) before optimization
return x, fx
示例3: triplet_loss
def triplet_loss(self, z_p, z_n, z_d, margin=0.1, l2=0):
l_n = torch.sqrt(((z_p - z_n) ** 2).sum(dim=1))
l_d = - torch.sqrt(((z_p - z_d) ** 2).sum(dim=1))
l_nd = l_n + l_d
loss = F.relu(l_n + l_d + margin)
l_n = torch.mean(l_n)
l_d = torch.mean(l_d)
l_nd = torch.mean(l_n + l_d)
loss = torch.mean(loss)
if l2 != 0:
loss += l2 * (torch.norm(z_p) + torch.norm(z_n) + torch.norm(z_d))
return loss, l_n, l_d, l_nd
示例4: save_conv_shrink_bn
def save_conv_shrink_bn(fp, conv_model, bn_model, eps=1e-5):
if bn_model.bias.is_cuda:
bias = bn_model.bias.data - bn_model.running_mean * bn_model.weight.data / torch.sqrt(bn_model.running_var + eps)
convert2cpu(bias).numpy().tofile(fp)
s = conv_model.weight.data.size()
weight = conv_model.weight.data * (bn_model.weight.data / torch.sqrt(bn_model.running_var + eps)).view(-1,1,1,1).repeat(1, s[1], s[2], s[3])
convert2cpu(weight).numpy().tofile(fp)
else:
bias = bn_model.bias.data - bn_model.running_mean * bn_model.weight.data / torch.sqrt(bn_model.running_var + eps)
bias.numpy().tofile(fp)
s = conv_model.weight.data.size()
weight = conv_model.weight.data * (bn_model.weight.data / torch.sqrt(bn_model.running_var + eps)).view(-1,1,1,1).repeat(1, s[1], s[2], s[3])
weight.numpy().tofile(fp)
示例5: ldmk_loss
def ldmk_loss(input, target, weight=None, size_average=True):
n, c = input.size()
loss_ = (input - target) ** 2
iod = torch.sqrt(torch.sum(
(target[:, 36*2:37*2] - target[:, 45*2:46*2])**2, 1))
loss = torch.autograd.Variable(torch.zeros((n, c//2))).float().cuda()
for i in range(c//2):
loss[:, i] = torch.sqrt((loss_[:, i*2] + loss_[:, i*2+1])) / (iod+1e-6)
if size_average:
loss = torch.mean(loss)
return loss
示例6: test
def test():
network.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
target_indices = target
target_one_hot = to_one_hot(target_indices, length=network.digits.num_units)
data, target = Variable(data, volatile=True).cuda(), Variable(target_one_hot).cuda()
output = network(data)
test_loss += network.loss(data, output, target, size_average=False).data[0] # sum up batch loss
v_mag = torch.sqrt((output**2).sum(dim=2, keepdim=True))
pred = v_mag.data.max(1, keepdim=True)[1].cpu()
correct += pred.eq(target_indices.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss,
correct,
len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
示例7: test_hmc_conjugate_gaussian
def test_hmc_conjugate_gaussian(fixture,
num_samples,
warmup_steps,
hmc_params,
expected_means,
expected_precs,
mean_tol,
std_tol):
pyro.get_param_store().clear()
hmc_kernel = HMC(fixture.model, **hmc_params)
mcmc_run = MCMC(hmc_kernel, num_samples, warmup_steps).run(fixture.data)
for i in range(1, fixture.chain_len + 1):
param_name = 'loc_' + str(i)
marginal = EmpiricalMarginal(mcmc_run, sites=param_name)
latent_loc = marginal.mean
latent_std = marginal.variance.sqrt()
expected_mean = torch.ones(fixture.dim) * expected_means[i - 1]
expected_std = 1 / torch.sqrt(torch.ones(fixture.dim) * expected_precs[i - 1])
# Actual vs expected posterior means for the latents
logger.info('Posterior mean (actual) - {}'.format(param_name))
logger.info(latent_loc)
logger.info('Posterior mean (expected) - {}'.format(param_name))
logger.info(expected_mean)
assert_equal(rmse(latent_loc, expected_mean).item(), 0.0, prec=mean_tol)
# Actual vs expected posterior precisions for the latents
logger.info('Posterior std (actual) - {}'.format(param_name))
logger.info(latent_std)
logger.info('Posterior std (expected) - {}'.format(param_name))
logger.info(expected_std)
assert_equal(rmse(latent_std, expected_std).item(), 0.0, prec=std_tol)
示例8: forward
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
if self.device_id == None:
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
else:
x = input
sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0)
temp_x = x.cuda(self.device_id[0])
weight = sub_weights[0].cuda(self.device_id[0])
cosine = F.linear(F.normalize(temp_x), F.normalize(weight))
for i in range(1, len(self.device_id)):
temp_x = x.cuda(self.device_id[i])
weight = sub_weights[i].cuda(self.device_id[i])
cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1)
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
# --------------------------- convert label to one-hot ---------------------------
one_hot = torch.zeros(cosine.size())
if self.device_id != None:
one_hot = one_hot.cuda(self.device_id[0])
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4
output *= self.s
return output
示例9: pullaway_loss
def pullaway_loss(embeddings):
norm = torch.sqrt(torch.sum(embeddings ** 2.0, 1, keepdim=True))
normalized_embeddings = embeddings / norm
similarity = torch.matmul(normalized_embeddings, normalized_embeddings.transpose(1, 0))
batch_size = embeddings.size()[0]
pt_loss = (torch.sum(similarity) - batch_size) / (batch_size * (batch_size - 1))
return pt_loss
示例10: skewness_score
def skewness_score(x, dim=0):
'''Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
ripoff from: `scipy.stats.skewtest`.
Args:
a: Array of the sample data
axis: Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns:
statistic: The computed z-score for this test.
p-value: A 2-sided chi squared probability for the hypothesis test.
'''
x, n, dim = _x_n_dim(x, dim)
b2 = (x**3).mean(dim) / (x**2).mean(dim)**1.5
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = 3.0 * (n**2 + 27 * n - 70) * (n + 1) * (n + 3) /\
((n - 2.0) * (n + 5) * (n + 7) * (n + 9))
W2 = -1.0 + math.sqrt(2 * (beta2 - 1))
delta = 1.0 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y[y == 0] = 1
yalpha = y / alpha
Z = delta * torch.log(yalpha + torch.sqrt(yalpha**2 + 1))
return Z, 1 + torch.erf(-math.sqrt(0.5) * torch.abs(Z))
示例11: create_input
def create_input(points, sigma2):
bs, N, _ = points.size() #points has size bs,N,2
OP = torch.zeros(bs,N,N,4).type(dtype)
E = torch.eye(N).type(dtype).unsqueeze(0).expand(bs,N,N)
OP[:,:,:,0] = E
W = points.unsqueeze(1).expand(bs,N,N,dim) - points.unsqueeze(2).expand(bs,N,N,dim)
dists2 = (W * W).sum(3)
dists = torch.sqrt(dists2)
W = torch.exp(-dists2 / sigma2)
OP[:,:,:,1] = W
D = E * W.sum(2,True).expand(bs,N,N)
OP[:,:,:,2] = D
U = (torch.ones(N,N).type(dtype)/N).unsqueeze(0).expand(bs,N,N)
OP[:,:,:,3] = U
OP = Variable(OP)
x = Variable(points)
Y = Variable(W.clone())
# Normalize inputs
if normalize:
mu = x.sum(1)/N
mu_ext = mu.unsqueeze(1).expand_as(x)
var = ((x - mu_ext)*(x - mu_ext)).sum(1)/N
var_ext = var.unsqueeze(1).expand_as(x)
x = x - mu_ext
x = x/(10 * var_ext)
return (OP, x, Y), dists
示例12: forward
def forward(self, input1):
self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size())
for i in range(input1.size(0)):
self.batchgrid3d[i] = self.grid3d
self.batchgrid3d = Variable(self.batchgrid3d)
#print(self.batchgrid3d)
x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3)
y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3)
z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3)
#print(x)
r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5
#print(r)
theta = torch.acos(z/r)/(np.pi/2) - 1
#phi = torch.atan(y/x)
phi = torch.atan(y/(x + 1e-5)) + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor))
phi = phi/np.pi
output = torch.cat([theta,phi], 3)
return output
示例13: get_negative_expectation
def get_negative_expectation(q_samples, measure, average=True):
log_2 = math.log(2.)
if measure == 'GAN':
Eq = F.softplus(-q_samples) + q_samples
elif measure == 'JSD':
Eq = F.softplus(-q_samples) + q_samples - log_2
elif measure == 'X2':
Eq = -0.5 * ((torch.sqrt(q_samples ** 2) + 1.) ** 2)
elif measure == 'KL':
Eq = torch.exp(q_samples)
elif measure == 'RKL':
Eq = q_samples - 1.
elif measure == 'DV':
Eq = log_sum_exp(q_samples, 0) - math.log(q_samples.size(0))
elif measure == 'H2':
Eq = torch.exp(q_samples) - 1.
elif measure == 'W1':
Eq = q_samples
else:
raise_measure_error(measure)
if average:
return Eq.mean()
else:
return Eq
示例14: forward
def forward(self, x, y, xidx=None, yidx=None):
K = torch.sqrt(l2_distance(x, y))
u, v = self._get_uv(x, y, xidx, yidx)
if self.regularization == 'entropy':
return torch.exp((u[:, None] + v[None, :] - K) / self.alpha)
else:
return torch.clamp((u[:, None] + v[None, :] - K),
min=0) / (2 * self.alpha)
示例15: forward
def forward(self, input):
# Hack: Force noise vectors to be function of input so they are put into
# predict_net and not init_net when tracing with ONNX
epsilon_input = torch.randn(1, input.size()[1], device=input.device)
epsilon_output = torch.randn(
self.out_dimension - input.size()[1] + input.size()[1],
1,
device=input.device,
)
epsilon_in = torch.sign(epsilon_input) * torch.sqrt(torch.abs(epsilon_input))
epsilon_out = torch.sign(epsilon_output) * torch.sqrt(torch.abs(epsilon_output))
# Add noise to bias and weights
noise = torch.mul(epsilon_in, epsilon_out)
bias = self.bias + self.sigma_bias * epsilon_out.t()
weight = self.weight + self.sigma_weight * noise
return input.matmul(weight.t()) + bias