本文整理匯總了Python中torch.zeros_like方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.zeros_like方法的具體用法?Python torch.zeros_like怎麽用?Python torch.zeros_like使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.zeros_like方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: negative_bag_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def negative_bag_loss(self, cls_prob, box_prob):
"""Compute negative bag loss.
:math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`.
:math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples.
:math:`P_{j}^{bg}`: Classification probability of negative samples.
Args:
cls_prob (Tensor): Classification probability, in shape
(num_img, num_anchors, num_classes).
box_prob (Tensor): Box probability, in shape
(num_img, num_anchors, num_classes).
Returns:
Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
""" # noqa: E501, W605
prob = cls_prob * (1 - box_prob)
negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
prob, torch.zeros_like(prob), reduction='none')
return (1 - self.alpha) * negative_bag_loss
示例2: loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def loss(self, point_pred, point_targets, labels):
"""Calculate loss for MaskPointHead.
Args:
point_pred (Tensor): Point predication result, shape
(num_rois, num_classes, num_points).
point_targets (Tensor): Point targets, shape (num_roi, num_points).
labels (Tensor): Class label of corresponding boxes,
shape (num_rois, )
Returns:
dict[str, Tensor]: a dictionary of point loss components
"""
loss = dict()
if self.class_agnostic:
loss_point = self.loss_point(point_pred, point_targets,
torch.zeros_like(labels))
else:
loss_point = self.loss_point(point_pred, point_targets, labels)
loss['loss_point'] = loss_point
return loss
示例3: backward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def backward(ctx, grad_output):
assert grad_output.is_cuda
features, masks, rfeatures = ctx.saved_tensors
kernel_size = ctx.kernel_size
group_size = ctx.group_size
scale_factor = ctx.scale_factor
rgrad_output = torch.zeros_like(grad_output, requires_grad=False)
rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False)
rgrad_input = torch.zeros_like(features, requires_grad=False)
rgrad_masks = torch.zeros_like(masks, requires_grad=False)
grad_input = torch.zeros_like(features, requires_grad=False)
grad_masks = torch.zeros_like(masks, requires_grad=False)
carafe_ext.backward(grad_output.contiguous(), rfeatures, masks,
kernel_size, group_size, scale_factor,
rgrad_output, rgrad_input_hs, rgrad_input,
rgrad_masks, grad_input, grad_masks)
return grad_input, grad_masks, None, None, None, None
示例4: backward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
deform_pool_ext.deform_psroi_pooling_backward(
grad_output, data, rois, offset, output_count, grad_input,
grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels,
ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part,
ctx.trans_std)
return (grad_input, grad_rois, grad_offset, None, None, None, None,
None, None, None, None)
示例5: backward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
deform_conv_ext.modulated_deform_conv_backward(
input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1],
grad_input, grad_weight, grad_bias, grad_offset, grad_mask,
grad_output, weight.shape[2], weight.shape[3], ctx.stride,
ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,
ctx.groups, ctx.deformable_groups, ctx.with_bias)
if not ctx.with_bias:
grad_bias = None
return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
None, None, None, None, None)
示例6: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def forward(self, x, target):
similarity_matrix = x @ x.T # need gard here
label_matrix = target.unsqueeze(1) == target.unsqueeze(0)
negative_matrix = label_matrix.logical_not()
positive_matrix = label_matrix.fill_diagonal_(False)
sp = torch.where(positive_matrix, similarity_matrix,
torch.zeros_like(similarity_matrix))
sn = torch.where(negative_matrix, similarity_matrix,
torch.zeros_like(similarity_matrix))
ap = torch.clamp_min(1 + self.m - sp.detach(), min=0.)
an = torch.clamp_min(sn.detach() + self.m, min=0.)
logit_p = -self.gamma * ap * (sp - self.dp)
logit_n = self.gamma * an * (sn - self.dn)
logit_p = torch.where(positive_matrix, logit_p,
torch.zeros_like(logit_p))
logit_n = torch.where(negative_matrix, logit_n,
torch.zeros_like(logit_n))
loss = F.softplus(torch.logsumexp(logit_p, dim=1) +
torch.logsumexp(logit_n, dim=1)).mean()
return loss
示例7: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def forward(self, image, boxes, box_ind):
crops = torch.zeros_like(image)
if image.is_cuda:
_backend.crop_and_resize_gpu_forward(
image, boxes, box_ind,
self.extrapolation_value, self.crop_height, self.crop_width, self.crop_zdepth, crops)
else:
_backend.crop_and_resize_forward(
image, boxes, box_ind,
self.extrapolation_value, self.crop_height, self.crop_width, self.crop_zdepth, crops)
# save for backward
self.im_size = image.size()
self.save_for_backward(boxes, box_ind)
return crops
示例8: backward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def backward(self, grad_outputs):
boxes, box_ind = self.saved_tensors
grad_outputs = grad_outputs.contiguous()
grad_image = torch.zeros_like(grad_outputs).resize_(*self.im_size)
if grad_outputs.is_cuda:
_backend.crop_and_resize_gpu_backward(
grad_outputs, boxes, box_ind, grad_image
)
else:
_backend.crop_and_resize_backward(
grad_outputs, boxes, box_ind, grad_image
)
return grad_image, None, None
示例9: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def forward(self, image, boxes, box_ind):
crops = torch.zeros_like(image)
if image.is_cuda:
_backend.crop_and_resize_gpu_forward(
image, boxes, box_ind,
self.extrapolation_value, self.crop_height, self.crop_width, crops)
else:
_backend.crop_and_resize_forward(
image, boxes, box_ind,
self.extrapolation_value, self.crop_height, self.crop_width, crops)
# save for backward
self.im_size = image.size()
self.save_for_backward(boxes, box_ind)
return crops
示例10: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def __init__(self, params, eta=required, momentum=0, weight_decay=0, eps=1e-5):
if eta is not required and eta <= 0.0:
raise ValueError("Invalid eta: {}".format(eta))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(eta=eta, momentum=momentum, weight_decay=weight_decay)
super(DFW, self).__init__(params, defaults)
self.eps = eps
for group in self.param_groups:
if group['momentum']:
for p in group['params']:
self.state[p]['momentum_buffer'] = torch.zeros_like(p.data, requires_grad=False)
示例11: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def __init__(self, params, eta=required, momentum=0, weight_decay=0, eps=1e-5):
if eta is not required and eta <= 0.0:
raise ValueError("Invalid eta: {}".format(eta))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(eta=eta, momentum=momentum, weight_decay=weight_decay)
super(BPGrad, self).__init__(params, defaults)
self.eps = eps
for group in self.param_groups:
group['L'] = 1. / group['eta']
if group['momentum']:
for p in group['params']:
self.state[p]['v'] = torch.zeros_like(p.data, requires_grad=False)
示例12: maskedNLL
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def maskedNLL(y_pred, y_gt, mask):
acc = torch.zeros_like(mask)
muX = y_pred[:,:,0]
muY = y_pred[:,:,1]
sigX = y_pred[:,:,2]
sigY = y_pred[:,:,3]
rho = y_pred[:,:,4]
ohr = torch.pow(1-torch.pow(rho,2),-0.5)
x = y_gt[:,:, 0]
y = y_gt[:,:, 1]
# If we represent likelihood in feet^(-1):
out = 0.5*torch.pow(ohr, 2)*(torch.pow(sigX, 2)*torch.pow(x-muX, 2) + torch.pow(sigY, 2)*torch.pow(y-muY, 2) - 2*rho*torch.pow(sigX, 1)*torch.pow(sigY, 1)*(x-muX)*(y-muY)) - torch.log(sigX*sigY*ohr) + 1.8379
# If we represent likelihood in m^(-1):
# out = 0.5 * torch.pow(ohr, 2) * (torch.pow(sigX, 2) * torch.pow(x - muX, 2) + torch.pow(sigY, 2) * torch.pow(y - muY, 2) - 2 * rho * torch.pow(sigX, 1) * torch.pow(sigY, 1) * (x - muX) * (y - muY)) - torch.log(sigX * sigY * ohr) + 1.8379 - 0.5160
acc[:,:,0] = out
acc[:,:,1] = out
acc = acc*mask
lossVal = torch.sum(acc)/torch.sum(mask)
return lossVal
## NLL for sequence, outputs sequence of NLL values for each time-step, uses mask for variable output lengths, used for evaluation
示例13: calc_gaes
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def calc_gaes(rewards, dones, v_preds, gamma, lam):
'''
Calculate GAE from Schulman et al. https://arxiv.org/pdf/1506.02438.pdf
v_preds are values predicted for current states, with one last element as the final next_state
delta is defined as r + gamma * V(s') - V(s) in eqn 10
GAE is defined in eqn 16
This method computes in torch tensor to prevent unnecessary moves between devices (e.g. GPU tensor to CPU numpy)
NOTE any standardization is done outside of this method
'''
T = len(rewards)
assert T + 1 == len(v_preds) # v_preds includes states and 1 last next_state
gaes = torch.zeros_like(rewards)
future_gae = torch.tensor(0.0, dtype=rewards.dtype)
# to multiply with not_dones to handle episode boundary (last state has no V(s'))
not_dones = 1 - dones
for t in reversed(range(T)):
delta = rewards[t] + gamma * v_preds[t + 1] * not_dones[t] - v_preds[t]
gaes[t] = future_gae = delta + gamma * lam * not_dones[t] * future_gae
return gaes
示例14: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def __init__(self, theta, initial_dist, dt, num_steps=10):
"""
Implements a SIR model where the number of sick has been replaced with the fraction of sick people of the entire
population. Model taken from this article: https://arxiv.org/pdf/2004.06680.pdf
:param theta: The parameters (beta, gamma, sigma)
"""
if initial_dist.event_shape != torch.Size([3]):
raise NotImplementedError('Must be of size 3!')
def g(x, beta, gamma, sigma):
g1 = -sigma * x[..., 0] * x[..., 1]
g3 = torch.zeros_like(g1)
return concater(g1, -g1, g3)
inc_dist = Independent(Normal(torch.zeros(1), math.sqrt(dt) * torch.ones(1)), 1)
super().__init__((f, g), theta, initial_dist, inc_dist, dt=dt, num_steps=num_steps)
示例15: backward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import zeros_like [as 別名]
def backward(ctx, dldI_bxhxwxd, dldp_bxhxwx1):
tfims_bxhxwxd, tfimprob_bxhxwx1, \
tfimidxs_bxhxwx1, tfimweis_bxhxwx3, \
tfpoints2dmul_bxfx6, tfcolors_bxfx3d, \
tfprobface, tfprobcase, tfprobdis, tfprobdep, tfprobacc, \
debug_im = ctx.saved_tensors
dldp2 = torch.zeros_like(tfpoints2dmul_bxfx6)
dldp2_prob = torch.zeros_like(tfpoints2dmul_bxfx6)
dldc = torch.zeros_like(tfcolors_bxfx3d)
dr_cuda_batch.backward(dldI_bxhxwxd.contiguous(), \
dldp_bxhxwx1.contiguous(), \
tfims_bxhxwxd, tfimprob_bxhxwx1, \
tfimidxs_bxhxwx1, tfimweis_bxhxwx3, \
tfprobface, tfprobcase, tfprobdis, tfprobdep, tfprobacc, \
tfpoints2dmul_bxfx6, tfcolors_bxfx3d, \
dldp2, dldc, dldp2_prob, \
debug_im, multiplier, delta)
return None, dldp2 + dldp2_prob, None, dldc
###############################################################