本文整理匯總了Python中torch.clamp方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.clamp方法的具體用法?Python torch.clamp怎麽用?Python torch.clamp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.clamp方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def test(self, input, output):
'''
Return decoded output.
'''
input = Variable(input.cuda())
batch_size, _, _, H, W = input.size()
output = Variable(output.cuda())
gt = torch.cat([input, output], dim=1)
latent = self.encode(input, sample=False)
decoded_output, components = self.decode(latent, input.size(0))
decoded_output = decoded_output.view(*gt.size())
components = components.view(batch_size, self.n_frames_total, self.total_components,
self.n_channels, H, W)
latent['components'] = components
decoded_output = decoded_output.clamp(0, 1)
self.save_visuals(gt, decoded_output, components, latent)
return decoded_output.cpu(), latent
示例2: enc_ans_features
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def enc_ans_features(self, x_type_bow, x_types, x_type_bow_len, x_path_bow, x_paths, x_path_bow_len, x_ctx_ents, x_ctx_ent_len, x_ctx_ent_num):
'''
x_types: answer type
x_paths: answer path, i.e., bow of relation
x_ctx_ents: answer context, i.e., bow of entity words, (batch_size, num_cands, num_ctx, L)
'''
# ans_types = torch.mean(self.ent_type_embed(x_types.view(-1, x_types.size(-1))), 1).view(x_types.size(0), x_types.size(1), -1)
ans_type_bow = (self.lstm_enc_type(x_type_bow.view(-1, x_type_bow.size(-1)), x_type_bow_len.view(-1))[1]).view(x_type_bow.size(0), x_type_bow.size(1), -1)
ans_path_bow = (self.lstm_enc_path(x_path_bow.view(-1, x_path_bow.size(-1)), x_path_bow_len.view(-1))[1]).view(x_path_bow.size(0), x_path_bow.size(1), -1)
ans_paths = torch.mean(self.relation_embed(x_paths.view(-1, x_paths.size(-1))), 1).view(x_paths.size(0), x_paths.size(1), -1)
# Avg over ctx
ctx_num_mask = create_mask(x_ctx_ent_num.view(-1), x_ctx_ents.size(2), self.use_cuda).view(x_ctx_ent_num.shape + (-1,))
ans_ctx_ent = (self.lstm_enc_ctx(x_ctx_ents.view(-1, x_ctx_ents.size(-1)), x_ctx_ent_len.view(-1))[1]).view(x_ctx_ents.size(0), x_ctx_ents.size(1), x_ctx_ents.size(2), -1)
ans_ctx_ent = ctx_num_mask.unsqueeze(-1) * ans_ctx_ent
ans_ctx_ent = torch.sum(ans_ctx_ent, dim=2) / torch.clamp(x_ctx_ent_num.float().unsqueeze(-1), min=VERY_SMALL_NUMBER)
if self.ans_enc_dropout:
# ans_types = F.dropout(ans_types, p=self.ans_enc_dropout, training=self.training)
ans_type_bow = F.dropout(ans_type_bow, p=self.ans_enc_dropout, training=self.training)
ans_path_bow = F.dropout(ans_path_bow, p=self.ans_enc_dropout, training=self.training)
ans_paths = F.dropout(ans_paths, p=self.ans_enc_dropout, training=self.training)
ans_ctx_ent = F.dropout(ans_ctx_ent, p=self.ans_enc_dropout, training=self.training)
return ans_type_bow, None, ans_path_bow, ans_paths, ans_ctx_ent
示例3: iou
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def iou(source: Tensor, other: Tensor) -> Tensor:
source, other = source.unsqueeze(dim=-2).repeat(1, 1, other.shape[-2], 1), \
other.unsqueeze(dim=-3).repeat(1, source.shape[-2], 1, 1)
source_area = (source[..., 2] - source[..., 0]) * (source[..., 3] - source[..., 1])
other_area = (other[..., 2] - other[..., 0]) * (other[..., 3] - other[..., 1])
intersection_left = torch.max(source[..., 0], other[..., 0])
intersection_top = torch.max(source[..., 1], other[..., 1])
intersection_right = torch.min(source[..., 2], other[..., 2])
intersection_bottom = torch.min(source[..., 3], other[..., 3])
intersection_width = torch.clamp(intersection_right - intersection_left, min=0)
intersection_height = torch.clamp(intersection_bottom - intersection_top, min=0)
intersection_area = intersection_width * intersection_height
return intersection_area / (source_area + other_area - intersection_area)
示例4: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Tensor of audio of dimension (..., time).
"""
if self.gain_type == "amplitude":
waveform = waveform * self.gain
if self.gain_type == "db":
waveform = F.gain(waveform, self.gain)
if self.gain_type == "power":
waveform = F.gain(waveform, 10 * math.log10(self.gain))
return torch.clamp(waveform, -1, 1)
示例5: cond_samples
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def cond_samples(f, replay_buffer, args, device, fresh=False):
sqrt = lambda x: int(t.sqrt(t.Tensor([x])))
plot = lambda p, x: tv.utils.save_image(t.clamp(x, -1, 1), p, normalize=True, nrow=sqrt(x.size(0)))
if fresh:
replay_buffer = uncond_samples(f, args, device, save=False)
n_it = replay_buffer.size(0) // 100
all_y = []
for i in range(n_it):
x = replay_buffer[i * 100: (i + 1) * 100].to(device)
y = f.classify(x).max(1)[1]
all_y.append(y)
all_y = t.cat(all_y, 0)
each_class = [replay_buffer[all_y == l] for l in range(10)]
print([len(c) for c in each_class])
for i in range(100):
this_im = []
for l in range(10):
this_l = each_class[l][i * 10: (i + 1) * 10]
this_im.append(this_l)
this_im = t.cat(this_im, 0)
if this_im.size(0) > 0:
plot('{}/samples_{}.png'.format(args.save_dir, i), this_im)
print(i)
示例6: intersect
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
示例7: calc_sil_policy_val_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def calc_sil_policy_val_loss(self, batch, pdparams):
'''
Calculate the SIL policy losses for actor and critic
sil_policy_loss = -log_prob * max(R - v_pred, 0)
sil_val_loss = (max(R - v_pred, 0)^2) / 2
This is called on a randomly-sample batch from experience replay
'''
v_preds = self.calc_v(batch['states'], use_cache=False)
rets = math_util.calc_returns(batch['rewards'], batch['dones'], self.gamma)
clipped_advs = torch.clamp(rets - v_preds, min=0.0)
action_pd = policy_util.init_action_pd(self.body.ActionPD, pdparams)
actions = batch['actions']
if self.body.env.is_venv:
actions = math_util.venv_unpack(actions)
log_probs = action_pd.log_prob(actions)
sil_policy_loss = - self.sil_policy_loss_coef * (log_probs * clipped_advs).mean()
sil_val_loss = self.sil_val_loss_coef * clipped_advs.pow(2).mean() / 2
logger.debug(f'SIL actor policy loss: {sil_policy_loss:g}')
logger.debug(f'SIL critic value loss: {sil_val_loss:g}')
return sil_policy_loss, sil_val_loss
示例8: huber_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def huber_loss(error, delta=1.0):
"""
Args:
error: Torch tensor (d1,d2,...,dk)
Returns:
loss: Torch tensor (d1,d2,...,dk)
x = error = pred - gt or dist(pred,gt)
0.5 * |x|^2 if |x|<=d
0.5 * d^2 + d * (|x|-d) if |x|>d
Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py
"""
abs_error = torch.abs(error)
#quadratic = torch.min(abs_error, torch.FloatTensor([delta]))
quadratic = torch.clamp(abs_error, max=delta)
linear = (abs_error - quadratic)
loss = 0.5 * quadratic**2 + delta * linear
return loss
示例9: tforward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def tforward(self, disp, edge=None):
self.sobel=self.sobel.to(disp.device)
if edge is not None:
grad = self.sobel(disp)
grad = torch.sqrt(grad[:,0:1,...]**2 + grad[:,1:2,...]**2 + 1e-8)
pdf = (1-edge)/self.b0 * torch.exp(-torch.abs(grad)/self.b0) + \
edge/self.b1 * torch.exp(-torch.abs(grad)/self.b1)
val = torch.mean(-torch.log(pdf.clamp(min=1e-4)))
else:
# on qifeng's data we don't have ambient info
# therefore we supress edge everywhere
grad = self.sobel(disp)
grad = torch.sqrt(grad[:,0:1,...]**2 + grad[:,1:2,...]**2 + 1e-8)
grad= torch.clamp(grad, 0, 1.0)
val = torch.mean(grad)
return val
示例10: fwd
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def fwd(self, depth0, depth1, R0, t0, R1, t1):
uv1, d1 = super().tforward(depth0, R0, t0, R1, t1)
uv1[..., 0] = 2 * (uv1[..., 0] / (self.im_width-1) - 0.5)
uv1[..., 1] = 2 * (uv1[..., 1] / (self.im_height-1) - 0.5)
uv1 = uv1.view(-1, self.im_height, self.im_width, 2).clone()
depth10 = torch.nn.functional.grid_sample(depth1, uv1, padding_mode='border')
diff = torch.abs(d1.view(-1) - depth10.view(-1))
if self.clamp > 0:
diff = torch.clamp(diff, 0, self.clamp)
# return diff without clamping for debugging
return diff.mean()
示例11: boxes_to_masks
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def boxes_to_masks(boxes, h, w, padding=0.0):
n = boxes.shape[0]
boxes = boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
b_w = x2 - x1
b_h = y2 - y1
x1 = torch.clamp(x1 - 1 - b_w * padding, min=0)
x2 = torch.clamp(x2 + 1 + b_w * padding, max=w)
y1 = torch.clamp(y1 - 1 - b_h * padding, min=0)
y2 = torch.clamp(y2 + 1 + b_h * padding, max=h)
rows = torch.arange(w, device=boxes.device, dtype=x1.dtype).view(1, 1, -1).expand(n, h, w)
cols = torch.arange(h, device=boxes.device, dtype=x1.dtype).view(1, -1, 1).expand(n, h, w)
masks_left = rows >= x1.view(-1, 1, 1)
masks_right = rows < x2.view(-1, 1, 1)
masks_up = cols >= y1.view(-1, 1, 1)
masks_down = cols < y2.view(-1, 1, 1)
masks = masks_left * masks_right * masks_up * masks_down
return masks
示例12: crop_by_box
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def crop_by_box(masks, box, padding=0.0):
n, h, w = masks.size()
b_w = box[2] - box[0]
b_h = box[3] - box[1]
x1 = torch.clamp(box[0:1] - b_w * padding - 1, min=0)
x2 = torch.clamp(box[2:3] + b_w * padding + 1, max=w - 1)
y1 = torch.clamp(box[1:2] - b_h * padding - 1, min=0)
y2 = torch.clamp(box[3:4] + b_h * padding + 1, max=h - 1)
rows = torch.arange(w, device=masks.device, dtype=x1.dtype).view(1, 1, -1).expand(n, h, w)
cols = torch.arange(h, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(n, h, w)
masks_left = rows >= x1.expand(n, 1, 1)
masks_right = rows < x2.expand(n, 1, 1)
masks_up = cols >= y1.expand(n, 1, 1)
masks_down = cols < y2.expand(n, 1, 1)
crop_mask = masks_left * masks_right * masks_up * masks_down
return masks * crop_mask.float(), crop_mask
示例13: _split_and_clip
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def _split_and_clip(boxes, scores, index, valid_size):
boxes_out, scores_out = [], []
for img_id, valid_size_i in enumerate(valid_size):
idx = index == img_id
if idx.any().item():
boxes_i = boxes[idx]
boxes_i[:, :, [0, 2]] = torch.clamp(boxes_i[:, :, [0, 2]], min=0, max=valid_size_i[0])
boxes_i[:, :, [1, 3]] = torch.clamp(boxes_i[:, :, [1, 3]], min=0, max=valid_size_i[1])
boxes_out.append(boxes_i)
scores_out.append(scores[idx])
else:
boxes_out.append(None)
scores_out.append(None)
return boxes_out, scores_out
示例14: intersection_area
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def intersection_area(yx_min1, yx_max1, yx_min2, yx_max2):
"""
Calculates the intersection area of two lists of bounding boxes.
:author 申瑞瑉 (Ruimin Shen)
:param yx_min1: The top left coordinates (y, x) of the first list (size [N1, 2]) of bounding boxes.
:param yx_max1: The bottom right coordinates (y, x) of the first list (size [N1, 2]) of bounding boxes.
:param yx_min2: The top left coordinates (y, x) of the second list (size [N2, 2]) of bounding boxes.
:param yx_max2: The bottom right coordinates (y, x) of the second list (size [N2, 2]) of bounding boxes.
:return: The matrix (size [N1, N2]) of the intersection area.
"""
ymin1, xmin1 = torch.split(yx_min1, 1, -1)
ymax1, xmax1 = torch.split(yx_max1, 1, -1)
ymin2, xmin2 = torch.split(yx_min2, 1, -1)
ymax2, xmax2 = torch.split(yx_max2, 1, -1)
max_ymin = torch.max(ymin1.repeat(1, ymin2.size(0)), torch.transpose(ymin2, 0, 1).repeat(ymin1.size(0), 1)) # PyTorch's bug
min_ymax = torch.min(ymax1.repeat(1, ymax2.size(0)), torch.transpose(ymax2, 0, 1).repeat(ymax1.size(0), 1)) # PyTorch's bug
height = torch.clamp(min_ymax - max_ymin, min=0)
max_xmin = torch.max(xmin1.repeat(1, xmin2.size(0)), torch.transpose(xmin2, 0, 1).repeat(xmin1.size(0), 1)) # PyTorch's bug
min_xmax = torch.min(xmax1.repeat(1, xmax2.size(0)), torch.transpose(xmax2, 0, 1).repeat(xmax1.size(0), 1)) # PyTorch's bug
width = torch.clamp(min_xmax - max_xmin, min=0)
return height * width
示例15: batch_intersection_area
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import clamp [as 別名]
def batch_intersection_area(yx_min1, yx_max1, yx_min2, yx_max2):
"""
Calculates the intersection area of two lists of bounding boxes for N independent batches.
:author 申瑞瑉 (Ruimin Shen)
:param yx_min1: The top left coordinates (y, x) of the first lists (size [N, N1, 2]) of bounding boxes.
:param yx_max1: The bottom right coordinates (y, x) of the first lists (size [N, N1, 2]) of bounding boxes.
:param yx_min2: The top left coordinates (y, x) of the second lists (size [N, N2, 2]) of bounding boxes.
:param yx_max2: The bottom right coordinates (y, x) of the second lists (size [N, N2, 2]) of bounding boxes.
:return: The matrics (size [N, N1, N2]) of the intersection area.
"""
ymin1, xmin1 = torch.split(yx_min1, 1, -1)
ymax1, xmax1 = torch.split(yx_max1, 1, -1)
ymin2, xmin2 = torch.split(yx_min2, 1, -1)
ymax2, xmax2 = torch.split(yx_max2, 1, -1)
max_ymin = torch.max(ymin1.repeat(1, 1, ymin2.size(1)), torch.transpose(ymin2, 1, 2).repeat(1, ymin1.size(1), 1)) # PyTorch's bug
min_ymax = torch.min(ymax1.repeat(1, 1, ymax2.size(1)), torch.transpose(ymax2, 1, 2).repeat(1, ymax1.size(1), 1)) # PyTorch's bug
height = torch.clamp(min_ymax - max_ymin, min=0)
max_xmin = torch.max(xmin1.repeat(1, 1, xmin2.size(1)), torch.transpose(xmin2, 1, 2).repeat(1, xmin1.size(1), 1)) # PyTorch's bug
min_xmax = torch.min(xmax1.repeat(1, 1, xmax2.size(1)), torch.transpose(xmax2, 1, 2).repeat(1, xmax1.size(1), 1)) # PyTorch's bug
width = torch.clamp(min_xmax - max_xmin, min=0)
return height * width