本文整理汇总了Python中torch.min函数的典型用法代码示例。如果您正苦于以下问题:Python min函数的具体用法?Python min怎么用?Python min使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了min函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bbox_ious
def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
mx = torch.min(boxes1[0], boxes2[0])
Mx = torch.max(boxes1[2], boxes2[2])
my = torch.min(boxes1[1], boxes2[1])
My = torch.max(boxes1[3], boxes2[3])
w1 = boxes1[2] - boxes1[0]
h1 = boxes1[3] - boxes1[1]
w2 = boxes2[2] - boxes2[0]
h2 = boxes2[3] - boxes2[1]
else:
mx = torch.min(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0)
Mx = torch.max(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0)
my = torch.min(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0)
My = torch.max(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0)
w1 = boxes1[2]
h1 = boxes1[3]
w2 = boxes2[2]
h2 = boxes2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
mask = ((cw <= 0) + (ch <= 0) > 0)
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
carea[mask] = 0
uarea = area1 + area2 - carea
return carea/uarea
示例2: bbox_iou
def bbox_iou(box1, box2):
"""
Returns the IoU of two bounding boxes
"""
#Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:,0], box2[:,1], box2[:,2], box2[:,3]
#get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
#Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
示例3: bbox_iou
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
示例4: bbox_overlaps
def bbox_overlaps(boxes, query_boxes):
"""
Parameters
----------
boxes: (N, 4) ndarray or tensor or variable
query_boxes: (K, 4) ndarray or tensor or variable
Returns
-------
overlaps: (N, K) overlap between boxes and query_boxes
"""
if isinstance(boxes, np.ndarray):
boxes = torch.from_numpy(boxes)
query_boxes = torch.from_numpy(query_boxes)
out_fn = lambda x: x.numpy() # If input is ndarray, turn the overlaps back to ndarray when return
else:
out_fn = lambda x: x
box_areas = (boxes[:, 2] - boxes[:, 0] + 1) * \
(boxes[:, 3] - boxes[:, 1] + 1)
query_areas = (query_boxes[:, 2] - query_boxes[:, 0] + 1) * \
(query_boxes[:, 3] - query_boxes[:, 1] + 1)
iw = (torch.min(boxes[:, 2:3], query_boxes[:, 2:3].t()) - torch.max(
boxes[:, 0:1], query_boxes[:, 0:1].t()) + 1).clamp(min=0)
ih = (torch.min(boxes[:, 3:4], query_boxes[:, 3:4].t()) - torch.max(
boxes[:, 1:2], query_boxes[:, 1:2].t()) + 1).clamp(min=0)
ua = box_areas.view(-1, 1) + query_areas.view(1, -1) - iw * ih
overlaps = iw * ih / ua
return out_fn(overlaps)
示例5: updateOutput
def updateOutput(self, input):
self._lazyInit()
dimension = self._getPositiveDimension(input)
torch.min(input, dimension, out=(self._output, self._indices), keepdim=True)
if input.dim() > 1:
self.output.set_(self._output.select(dimension, 0))
else:
self.output.set_(self._output)
return self.output
示例6: bbox_transform
def bbox_transform(self, boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0), clip_value=4.135166556742356):
"""Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas. See bbox_transform_inv for a
description of the weights argument.
"""
if boxes.size(0) == 0:
return None
#return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
# get boxes dimensions and centers
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
clip_value = Variable(torch.FloatTensor([clip_value]))
if boxes.is_cuda:
clip_value = clip_value.cuda()
# Prevent sending too large values into np.exp()
dw = torch.min(dw,clip_value)
dh = torch.min(dh,clip_value)
pred_ctr_x = dx * widths.unsqueeze(1) + ctr_x.unsqueeze(1)
pred_ctr_y = dy * heights.unsqueeze(1) + ctr_y.unsqueeze(1)
pred_w = torch.exp(dw) * widths.unsqueeze(1)
pred_h = torch.exp(dh) * heights.unsqueeze(1)
# pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes_x1 = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes_y1 = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes_x2 = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes_y2 = pred_ctr_y + 0.5 * pred_h - 1
pred_boxes = torch.cat((pred_boxes_x1,
pred_boxes_y1,
pred_boxes_x2,
pred_boxes_y2),1)
return pred_boxes
示例7: forward
def forward(self, v, u, d):
"""
@param v [batch_size, embedding_size] matrix to push
@param u [batch_size,] vector of pop signals in (0, 1)
@param d [batch_size,] vector of push signals in (0, 1)
@return [batch_size, embedding_size] or [batch_size, self.k, embedding_size] read matrix
"""
# update V, which is of size [t, bach_size, embedding_size]
v = v.view(1, self.batch_size, self.embedding_size)
self.V = torch.cat([self.V, v], 0) if len(self.V.data) != 0 else v
# TODO append to self.s so we can terminate lower loop early?
# TODO initialize stack to fixed size
# update s, which is of size [t, batch_size]
old_t = self.s.data.shape[0] if self.s.data.shape else 0
s = Variable(torch.FloatTensor(old_t + 1, self.batch_size))
w = u
for i in reversed(xrange(old_t)):
s_ = F.relu(self.s[i,:] - w)
w = F.relu(w - self.s[i,:])
s[i,:] = s_
s[old_t,:] = d
self.s = s
if self.k is None:
# calculate r, which is of size [batch_size, embedding_size]
r = Variable(torch.zeros([self.batch_size, self.embedding_size]))
for i in reversed(xrange(old_t + 1)):
used = torch.sum(self.s[i + 1:old_t + 1,:], 0) if i < old_t else self.zero
coeffs = torch.min(self.s[i,:], F.relu(1 - used))
# reformating coeffs into a matrix that can be multiplied element-wise
r += coeffs.view(self.batch_size, 1).repeat(1, self.embedding_size) * self.V[i,:,:]
return r
else: # calculate k read vectors
# TODO can probably make this more efficient
r = Variable(torch.zeros([self.batch_size, self.k, self.embedding_size]))
for k in xrange(self.k):
for i in reversed(xrange(old_t + 1)):
used = torch.sum(self.s[i + 1:old_t + 1,:], 0) if i < old_t else self.zero
coeffs = torch.min(self.s[i,:], F.relu(1 + k - used))
r[:,k,:] = r[:,k,:] + coeffs.view(self.batch_size, 1).repeat(1, self.embedding_size) * self.V[i,:,:]
for k in reversed(xrange(1, self.k)):
r[:,k,:] = r[:,k,:] - r[:,k - 1,:]
return r
示例8: box_iou
def box_iou(box1, box2):
"""
计算两个box之间的IOU,其中box1为default_box_xyxy(format 为xyxy),box2为bounding box
:param box1: default_boxes,[#default_boxes, 4]
:param box2: bounding_boxes,[#bounding_boxes, 4]
:return:
iou,sized [#default_boxes, #bounding_boxes]
"""
# print('box1.size():{}'.format(box1.size()))
# print('box2.size():{}'.format(box2.size()))
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [#default_boxes, #bounding_boxes, 2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [#default_boxes, #bounding_boxes, 2]
# print('lt:{}'.format(lt))
# print('rb:{}'.format(rb))
wh = (rb-lt).clamp(min=0) # [#default_boxes, #bounding_boxes, 2]
inter = wh[:, :, 0] * wh[:, :, 1] # [#default_boxes, #bounding_boxes]
# print('inter:{}'.format(inter))
area1 = (box1[:, 2]-box1[:, 0])*(box1[:, 3]-box1[:, 1]) # [#default_boxes]
area2 = (box2[:, 2]-box2[:, 0])*(box2[:, 3]-box2[:, 1]) # [#bounding_boxes]
# print('area1:{}'.format(area1))
# print('area2:{}'.format(area2))
iou = inter / (area1[:, None] + area2 - inter)
# print('iou:{}'.format(iou))
return iou
示例9: forward
def forward(self, v, u, d):
"""
@param v [batch_size, embedding_size] matrix to push
@param u [batch_size,] vector of pop signals in (0, 1)
@param d [batch_size,] vector of push signals in (0, 1)
@return [batch_size, embedding_size] read matrix
"""
# update V, which is of size [t, bach_size, embedding_size]
v = v.view(1, self.batch_size, self.embedding_size)
self.V = torch.cat([self.V, v], 0) if len(self.V.data) != 0 else v
# TODO initialize queue to fixed size
# update s, which is of size [t, batch_size]
old_t = self.s.size(0) if self.s.size() else 0
s = Variable(torch.FloatTensor(old_t + 1, self.batch_size))
w = u
for i in xrange(old_t):
s_ = F.relu(self.s[i,:] - w)
w = F.relu(w - self.s[i,:])
s[i,:] = s_
# if len(torch.nonzero(w.data)) == 0: break
# TODO does this if work properly now?
s[old_t,:] = d
self.s = s
# calculate r, which is of size [batch_size, embedding_size]
r = Variable(torch.zeros([self.batch_size, self.embedding_size]))
for i in xrange(old_t + 1):
used = torch.sum(self.s[:i,:], 0) if i > 0 else self.zero
coeffs = torch.min(self.s[i,:], F.relu(1 - used))
# reformating coeffs into a matrix that can be multiplied element-wise
r += coeffs.view(self.batch_size, 1).repeat(1, self.embedding_size) * self.V[i,:,:]
return r
示例10: generate_smooth_grad
def generate_smooth_grad(Backprop, prep_img, target_class, param_n, param_sigma_multiplier):
"""
Generates smooth gradients of given Backprop type. You can use this with both vanilla
and guided backprop
Args:
Backprop (class): Backprop type
prep_img (torch Variable): preprocessed image
target_class (int): target class of imagenet
param_n (int): Amount of images used to smooth gradient
param_sigma_multiplier (int): Sigma multiplier when calculating std of noise
"""
# Generate an empty image/matrix
smooth_grad = np.zeros(prep_img.size()[1:])
mean = 0
sigma = param_sigma_multiplier / (torch.max(prep_img) - torch.min(prep_img)).data[0]
for x in range(param_n):
# Generate noise
noise = Variable(prep_img.data.new(prep_img.size()).normal_(mean, sigma**2))
# Add noise to the image
noisy_img = prep_img + noise
# Calculate gradients
vanilla_grads = Backprop.generate_gradients(noisy_img, target_class)
# Add gradients to smooth_grad
smooth_grad = smooth_grad + vanilla_grads
# Average it out
smooth_grad = smooth_grad / param_n
return smooth_grad
示例11: read
def read(self, strength):
"""
The read operation looks at the first few items on the stack, in
the order determined by self._read_indices, such that the total
strength of these items is equal to the value of the strength
parameter. If necessary, the strength of the last vector is
reduced so that the total strength of the items read is exactly
equal to the strength parameter. The output of the read
operation is computed by taking the sum of all the vectors
looked at, weighted by their strengths.
:type strength: float
:param strength: The total amount of vectors to look at,
measured by their strengths
:rtype: Variable
:return: The output of the read operation, described above
"""
r = Variable(torch.zeros([self.batch_size, self.embedding_size]))
str_used = Variable(torch.zeros(self.batch_size))
for i in self._read_indices():
str_i = self.strengths[i, :]
str_weights = torch.min(str_i, relu(1 - str_used))
str_weights = str_weights.view(self.batch_size, 1)
str_weights = str_weights.repeat(1, self.embedding_size)
r += str_weights * self.contents[i, :, :]
str_used = str_used + str_i
return r
示例12: box_iou
def box_iou(box1, box2, order='xyxy'):
'''Compute the intersection over union of two set of boxes.
The default box order is (xmin, ymin, xmax, ymax).
Args:
box1: (tensor) bounding boxes, sized [N,4].
box2: (tensor) bounding boxes, sized [M,4].
order: (str) box order, either 'xyxy' or 'xywh'.
Return:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
'''
if order == 'xywh':
box1 = change_box_order(box1, 'xywh2xyxy')
box2 = change_box_order(box2, 'xywh2xyxy')
N = box1.size(0)
M = box2.size(0)
lt = torch.max(box1[:,None,:2], box2[:,:2]) # [N,M,2]
rb = torch.min(box1[:,None,2:], box2[:,2:]) # [N,M,2]
wh = (rb-lt+1).clamp(min=0) # [N,M,2]
inter = wh[:,:,0] * wh[:,:,1] # [N,M]
area1 = (box1[:,2]-box1[:,0]+1) * (box1[:,3]-box1[:,1]+1) # [N,]
area2 = (box2[:,2]-box2[:,0]+1) * (box2[:,3]-box2[:,1]+1) # [M,]
iou = inter / (area1[:,None] + area2 - inter)
return iou
示例13: shortest_dist
def shortest_dist(dist_mat):
"""Parallel version.
Args:
dist_mat: pytorch Variable, available shape:
1) [m, n]
2) [m, n, N], N is batch size
3) [m, n, *], * can be arbitrary additional dimensions
Returns:
dist: three cases corresponding to `dist_mat`:
1) scalar
2) pytorch Variable, with shape [N]
3) pytorch Variable, with shape [*]
"""
m, n = dist_mat.size()[:2]
# Just offering some reference for accessing intermediate distance.
dist = [[0 for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
if (i == 0) and (j == 0):
dist[i][j] = dist_mat[i, j]
elif (i == 0) and (j > 0):
dist[i][j] = dist[i][j - 1] + dist_mat[i, j]
elif (i > 0) and (j == 0):
dist[i][j] = dist[i - 1][j] + dist_mat[i, j]
else:
dist[i][j] = torch.min(dist[i - 1][j], dist[i][j - 1]) + dist_mat[i, j]
dist = dist[-1][-1]
return dist
示例14: bisect_demo
def bisect_demo():
""" Bisect the LB/UB on specified columns.
The key is to use scatter_() to convert indices into one-hot encodings.
"""
t1t2 = torch.stack((torch.randn(5, 4), torch.randn(5, 4)), dim=-1)
lb, _ = torch.min(t1t2, dim=-1)
ub, _ = torch.max(t1t2, dim=-1)
print('LB:', lb)
print('UB:', ub)
# random idxs for testing
idxs = torch.randn_like(lb)
_, idxs = idxs.max(dim=-1) # <Batch>
print('Split idxs:', idxs)
idxs = idxs.unsqueeze(dim=-1) # Batch x 1
idxs = torch.zeros_like(lb).byte().scatter_(-1, idxs, 1) # convert into one-hot encoding
print('Reorg idxs:', idxs)
mid = (lb + ub) / 2.0
lefts_lb = lb
lefts_ub = torch.where(idxs, mid, ub) # use the one-hot encoding to call torch.where()
rights_lb = torch.where(idxs, mid, lb) # definitely faster than element-wise reassignment
rights_ub = ub
print('LEFT LB:', lefts_lb)
print('LEFT UB:', lefts_ub)
print('RIGHT LB:', rights_lb)
print('RIGHT UB:', rights_ub)
newlb = torch.cat((lefts_lb, rights_lb), dim=0)
newub = torch.cat((lefts_ub, rights_ub), dim=0)
return newlb, newub
示例15: forward
def forward(self, y_pred, y_true, eps=1e-6):
return NotImplementedError
torch.nn.modules.loss._assert_no_grad(y_true)
assert y_pred.shape[1] == 2
same_left = torch.stack([y_true[:, 0], y_pred[:, 0]], dim=1)
same_left, _ = torch.max(same_left, dim=1)
same_right = torch.stack([y_true[:, 1], y_pred[:, 1]], dim=1)
same_right, _ = torch.min(same_right, dim=1)
same_len = same_right - same_left + 1 # (batch_size,)
same_len = torch.stack([same_len, torch.zeros_like(same_len)], dim=1)
same_len, _ = torch.max(same_len, dim=1)
same_len = same_len.type(torch.float)
pred_len = (y_pred[:, 1] - y_pred[:, 0] + 1).type(torch.float)
true_len = (y_true[:, 1] - y_true[:, 0] + 1).type(torch.float)
pre = same_len / (pred_len + eps)
rec = same_len / (true_len + eps)
f1 = 2 * pre * rec / (pre + rec + eps)
return -torch.mean(f1)