本文整理汇总了Python中torch.min方法的典型用法代码示例。如果您正苦于以下问题:Python torch.min方法的具体用法?Python torch.min怎么用?Python torch.min使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.min方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: iou_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def iou_loss(pred, target, eps=1e-6):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
loss = -ious.log()
return loss
示例2: crop_images_random
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
# crops images into random squares up to scale fraction
# WARNING: overwrites images!
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
img = cv2.imread(file) # BGR
if img is not None:
h, w = img.shape[:2]
# create random mask
a = 30 # minimum size (pixels)
mask_h = random.randint(a, int(max(a, h * scale))) # mask height
mask_w = mask_h # mask width
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
示例3: plot_images
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def plot_images(imgs, targets, paths=None, fname='images.jpg'):
# Plots training images overlaid with targets
imgs = imgs.cpu().numpy()
targets = targets.cpu().numpy()
# targets = targets[targets[:, 1] == 21] # plot only one class
fig = plt.figure(figsize=(10, 10))
bs, _, h, w = imgs.shape # batch size, _, height, width
bs = min(bs, 16) # limit plot to 16 images
ns = np.ceil(bs ** 0.5) # number of subplots
for i in range(bs):
boxes = xywh2xyxy(targets[targets[:, 0] == i, 2:6]).T
boxes[[0, 2]] *= w
boxes[[1, 3]] *= h
plt.subplot(ns, ns, i + 1).imshow(imgs[i].transpose(1, 2, 0))
plt.plot(boxes[[0, 2, 2, 0, 0]], boxes[[1, 1, 3, 3, 1]], '.-')
plt.axis('off')
if paths is not None:
s = Path(paths[i]).name
plt.title(s[:min(len(s), 40)], fontdict={'size': 8}) # limit to 40 characters
fig.tight_layout()
fig.savefig(fname, dpi=200)
plt.close()
示例4: plot_evolution_results
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
# Plot hyperparameter evolution results in evolve.txt
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
weights = (f - f.min()) ** 2 # for weighted results
fig = plt.figure(figsize=(12, 10))
matplotlib.rc('font', **{'size': 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 5]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(4, 5, i + 1)
plt.plot(mu, f.max(), 'o', markersize=10)
plt.plot(y, f, '.')
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
print('%15s: %.3g' % (k, mu))
fig.tight_layout()
plt.savefig('evolve.png', dpi=200)
示例5: plot_results
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def plot_results(start=0, stop=0): # from utils.utils import *; plot_results()
# Plot training results files 'results*.txt'
fig, ax = plt.subplots(2, 5, figsize=(14, 7))
ax = ax.ravel()
s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
'val GIoU', 'val Objectness', 'val Classification', 'mAP', 'F1']
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # dont show zero loss values
ax[i].plot(x, y, marker='.', label=f.replace('.txt', ''))
ax[i].set_title(s[i])
if i in [5, 6, 7]: # share train and val loss y axes
ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
fig.tight_layout()
ax[1].legend()
fig.savefig('results.png', dpi=200)
示例6: plot_results_overlay
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
# Plot training results files 'results*.txt', overlaying train and val losses
s = ['train', 'train', 'train', 'Precision', 'mAP', 'val', 'val', 'val', 'Recall', 'F1'] # legends
t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5))
ax = ax.ravel()
for i in range(5):
for j in [i, i + 5]:
y = results[j, x]
if i in [0, 1, 2]:
y[y == 0] = np.nan # dont show zero loss values
ax[i].plot(x, y, marker='.', label=s[j])
ax[i].set_title(t[i])
ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename
fig.tight_layout()
fig.savefig(f.replace('.txt', '.png'), dpi=200)
示例7: shem
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def shem(roi_probs_neg, negative_count, ohem_poolsize):
"""
stochastic hard example mining: from a list of indices (referring to non-matched predictions),
determine a pool of highest scoring (worst false positives) of size negative_count*ohem_poolsize.
Then, sample n (= negative_count) predictions of this pool as negative examples for loss.
:param roi_probs_neg: tensor of shape (n_predictions, n_classes).
:param negative_count: int.
:param ohem_poolsize: int.
:return: (negative_count). indices refer to the positions in roi_probs_neg. If pool smaller than expected due to
limited negative proposals availabel, this function will return sampled indices of number < negative_count without
throwing an error.
"""
# sort according to higehst foreground score.
probs, order = roi_probs_neg[:, 1:].max(1)[0].sort(descending=True)
select = torch.tensor((ohem_poolsize * int(negative_count), order.size()[0])).min().int()
pool_indices = order[:select]
rand_idx = torch.randperm(pool_indices.size()[0])
return pool_indices[rand_idx[:negative_count].cuda()]
示例8: iou
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def iou(source: Tensor, other: Tensor) -> Tensor:
source, other = source.unsqueeze(dim=-2).repeat(1, 1, other.shape[-2], 1), \
other.unsqueeze(dim=-3).repeat(1, source.shape[-2], 1, 1)
source_area = (source[..., 2] - source[..., 0]) * (source[..., 3] - source[..., 1])
other_area = (other[..., 2] - other[..., 0]) * (other[..., 3] - other[..., 1])
intersection_left = torch.max(source[..., 0], other[..., 0])
intersection_top = torch.max(source[..., 1], other[..., 1])
intersection_right = torch.min(source[..., 2], other[..., 2])
intersection_bottom = torch.min(source[..., 3], other[..., 3])
intersection_width = torch.clamp(intersection_right - intersection_left, min=0)
intersection_height = torch.clamp(intersection_bottom - intersection_top, min=0)
intersection_area = intersection_width * intersection_height
return intersection_area / (source_area + other_area - intersection_area)
示例9: intersect
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
示例10: update_critic
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def update_critic(self, obs, action, reward, next_obs, not_done, L, step):
with torch.no_grad():
_, policy_action, log_pi, _ = self.actor(next_obs)
target_Q1, target_Q2 = self.critic_target(next_obs, policy_action)
target_V = torch.min(target_Q1,
target_Q2) - self.alpha.detach() * log_pi
target_Q = reward + (not_done * self.discount * target_V)
# get current Q estimates
current_Q1, current_Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(current_Q1,
target_Q) + F.mse_loss(current_Q2, target_Q)
L.log('train_critic/loss', critic_loss, step)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
self.critic.log(L, step)
示例11: huber_loss
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def huber_loss(error, delta=1.0):
"""
Args:
error: Torch tensor (d1,d2,...,dk)
Returns:
loss: Torch tensor (d1,d2,...,dk)
x = error = pred - gt or dist(pred,gt)
0.5 * |x|^2 if |x|<=d
0.5 * d^2 + d * (|x|-d) if |x|>d
Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py
"""
abs_error = torch.abs(error)
#quadratic = torch.min(abs_error, torch.FloatTensor([delta]))
quadratic = torch.clamp(abs_error, max=delta)
linear = (abs_error - quadratic)
loss = 0.5 * quadratic**2 + delta * linear
return loss
示例12: poly2bbox
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def poly2bbox(polys):
"""
without label
:param polys: (x1, y1, ..., x4, y4) (n, 8)
:return: boxes: (xmin, ymin, xmax, ymax) (n, 4)
"""
n = polys.shape[0]
xs = np.reshape(polys, (n, 4, 2))[:, :, 0]
ys = np.reshape(polys, (n, 4, 2))[:, :, 1]
xmin = np.min(xs, axis=1)
ymin = np.min(ys, axis=1)
xmax = np.max(xs, axis=1)
ymax = np.max(ys, axis=1)
xmin = xmin[:, np.newaxis]
ymin = ymin[:, np.newaxis]
xmax = xmax[:, np.newaxis]
ymax = ymax[:, np.newaxis]
return np.concatenate((xmin, ymin, xmax, ymax), 1)
示例13: distance2bbox
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
x1 = points[:, 0] - distance[:, 0]
y1 = points[:, 1] - distance[:, 1]
x2 = points[:, 0] + distance[:, 2]
y2 = points[:, 1] + distance[:, 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
return torch.stack([x1, y1, x2, y2], -1)
示例14: boxes_to_masks
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def boxes_to_masks(boxes, h, w, padding=0.0):
n = boxes.shape[0]
boxes = boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
b_w = x2 - x1
b_h = y2 - y1
x1 = torch.clamp(x1 - 1 - b_w * padding, min=0)
x2 = torch.clamp(x2 + 1 + b_w * padding, max=w)
y1 = torch.clamp(y1 - 1 - b_h * padding, min=0)
y2 = torch.clamp(y2 + 1 + b_h * padding, max=h)
rows = torch.arange(w, device=boxes.device, dtype=x1.dtype).view(1, 1, -1).expand(n, h, w)
cols = torch.arange(h, device=boxes.device, dtype=x1.dtype).view(1, -1, 1).expand(n, h, w)
masks_left = rows >= x1.view(-1, 1, 1)
masks_right = rows < x2.view(-1, 1, 1)
masks_up = cols >= y1.view(-1, 1, 1)
masks_down = cols < y2.view(-1, 1, 1)
masks = masks_left * masks_right * masks_up * masks_down
return masks
示例15: crop_by_box
# 需要导入模块: import torch [as 别名]
# 或者: from torch import min [as 别名]
def crop_by_box(masks, box, padding=0.0):
n, h, w = masks.size()
b_w = box[2] - box[0]
b_h = box[3] - box[1]
x1 = torch.clamp(box[0:1] - b_w * padding - 1, min=0)
x2 = torch.clamp(box[2:3] + b_w * padding + 1, max=w - 1)
y1 = torch.clamp(box[1:2] - b_h * padding - 1, min=0)
y2 = torch.clamp(box[3:4] + b_h * padding + 1, max=h - 1)
rows = torch.arange(w, device=masks.device, dtype=x1.dtype).view(1, 1, -1).expand(n, h, w)
cols = torch.arange(h, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(n, h, w)
masks_left = rows >= x1.expand(n, 1, 1)
masks_right = rows < x2.expand(n, 1, 1)
masks_up = cols >= y1.expand(n, 1, 1)
masks_down = cols < y2.expand(n, 1, 1)
crop_mask = masks_left * masks_right * masks_up * masks_down
return masks * crop_mask.float(), crop_mask