本文整理汇总了Python中mxnet.nd.max方法的典型用法代码示例。如果您正苦于以下问题:Python nd.max方法的具体用法?Python nd.max怎么用?Python nd.max使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.nd
的用法示例。
在下文中一共展示了nd.max方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_max_pred
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def get_max_pred(batch_heatmaps):
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = nd.argmax(heatmaps_reshaped, 2)
maxvals = nd.max(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = nd.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = nd.floor((preds[:, :, 1]) / width)
pred_mask = nd.tile(nd.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
示例2: crop_resize_normalize
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def crop_resize_normalize(img, bbox_list, output_size,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
output_list = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
for bbox in bbox_list:
x0 = max(int(bbox[0]), 0)
y0 = max(int(bbox[1]), 0)
x1 = min(int(bbox[2]), int(img.shape[1]))
y1 = min(int(bbox[3]), int(img.shape[0]))
w = x1 - x0
h = y1 - y0
res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0]))
res_img = transform_test(res_img)
output_list.append(res_img)
output_array = nd.stack(*output_list)
return output_array
示例3: transformBox
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def transformBox(pt, ul, br, inpH, inpW, resH, resW):
center = np.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = np.zeros(2)
_pt[0] = pt[0] - ul[0]
_pt[1] = pt[1] - ul[1]
# Move to center
_pt[0] = _pt[0] + max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] + max(0, (lenH - 1) / 2 - center[1])
pt = (_pt * resH) / lenH
pt[0] = round(float(pt[0]))
pt[1] = round(float(pt[1]))
return pt
示例4: transformBoxInvert
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def transformBoxInvert(pt, ul, br, resH, resW):
# type: (Tensor, Tensor, Tensor, float, float, float, float) -> Tensor
center = mx.nd.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW)
lenW = lenH * resW / resH
_pt = (pt * lenH) / resH
if bool(((lenW - 1) / 2 - center[0]) > 0):
_pt[0] = _pt[0] - ((lenW - 1) / 2 - center[0]).asscalar()
if bool(((lenH - 1) / 2 - center[1]) > 0):
_pt[1] = _pt[1] - ((lenH - 1) / 2 - center[1]).asscalar()
new_point = mx.nd.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
示例5: crop_resize_normalize
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def crop_resize_normalize(img, bbox_list, output_size):
output_list = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
for bbox in bbox_list:
x0 = max(int(bbox[0]), 0)
y0 = max(int(bbox[1]), 0)
x1 = min(int(bbox[2]), int(img.shape[1]))
y1 = min(int(bbox[3]), int(img.shape[0]))
w = x1 - x0
h = y1 - y0
res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0]))
res_img = transform_test(res_img)
output_list.append(res_img)
output_array = nd.stack(*output_list)
return output_array
示例6: bbox_overlaps
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def bbox_overlaps(anchors:mx.nd.NDArray, gt:mx.nd.NDArray):
"""
Get IoU of the anchors and ground truth bounding boxes.
The shape of anchors and gt should be (N, 4) and (M, 4)
So the shape of return value is (N, M)
"""
ret = []
for i in range(gt.shape[0]):
cgt = gt[i].reshape((1, 4)).broadcast_to(anchors.shape)
# inter
x0 = nd.max(nd.stack(anchors[:,0], cgt[:,0]), axis=0)
y0 = nd.max(nd.stack(anchors[:,1], cgt[:,1]), axis=0)
x1 = nd.min(nd.stack(anchors[:,2], cgt[:,2]), axis=0)
y1 = nd.min(nd.stack(anchors[:,3], cgt[:,3]), axis=0)
inter = _get_area(nd.concatenate([x0.reshape((-1, 1)),
y0.reshape((-1, 1)),
x1.reshape((-1, 1)),
y1.reshape((-1, 1))], axis=1))
outer = _get_area(anchors) + _get_area(cgt) - inter
iou = inter / outer
ret.append(iou.reshape((-1, 1)))
ret=nd.concatenate(ret, axis=1)
return ret
示例7: upscale_bbox_fn
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def upscale_bbox_fn(bbox, img, scale=1.25):
new_bbox = []
x0 = bbox[0]
y0 = bbox[1]
x1 = bbox[2]
y1 = bbox[3]
w = (x1 - x0) / 2
h = (y1 - y0) / 2
center = [x0 + w, y0 + h]
new_x0 = max(center[0] - w * scale, 0)
new_y0 = max(center[1] - h * scale, 0)
new_x1 = min(center[0] + w * scale, img.shape[1])
new_y1 = min(center[1] + h * scale, img.shape[0])
new_bbox = [new_x0, new_y0, new_x1, new_y1]
return new_bbox
示例8: refine_bound
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def refine_bound(ul, br):
"""Adjust bound"""
ul[0] = min(ul[0], br[0] - 5)
ul[1] = min(ul[1], br[1] - 5)
br[0] = max(br[0], ul[0] + 5)
br[1] = max(br[1], ul[1] + 5)
return ul, br
示例9: random_sample_bbox
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def random_sample_bbox(ul, br, w, h, im_width, im_height):
"""Take random sample"""
patch_scale = random.uniform(0, 1)
if patch_scale > 0.85:
ratio = float(h) / w
if w < h:
patch_w = patch_scale * w
patch_h = patch_w * ratio
else:
patch_h = patch_scale * h
patch_w = patch_h / ratio
xmin = ul[0] + random.uniform(0, 1) * (w - patch_w)
ymin = ul[1] + random.uniform(0, 1) * (h - patch_h)
xmax = xmin + patch_w + 1
ymax = ymin + patch_h + 1
else:
xmin = max(1, min(ul[0] + np.random.normal(-0.0142, 0.1158) * w, im_width - 3))
ymin = max(1, min(ul[1] + np.random.normal(0.0043, 0.068) * h, im_height - 3))
xmax = min(max(xmin + 2, br[0] + np.random.normal(0.0154, 0.1337) * w), im_width - 3)
ymax = min(max(ymin + 2, br[1] + np.random.normal(-0.0013, 0.0711) * h), im_height - 3)
ul[0] = xmin
ul[1] = ymin
br[0] = xmax
br[1] = ymax
return ul, br
示例10: drawGaussian
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def drawGaussian(img, pt, sigma, sig=1):
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return img
# Generate gaussian
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * (sigma ** 2)))
if sig < 0:
g *= opt.spRate
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return img
示例11: pseudo_labeling
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def pseudo_labeling(self, logits, confidence=0.):
softmax = nd.softmax(logits, axis=1)
prob = nd.max(softmax, axis=1)
p_label = nd.argmax(softmax, axis=1)
mask = prob > confidence
return p_label, mask
# def update_beta(self):
# return self.args.beta
示例12: _get_area
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def _get_area(bbox:mx.nd.NDArray):
zeros = mx.nd.zeros_like(bbox[:, 0])
width = mx.nd.max(nd.stack(bbox[:, 2] - bbox[:, 0], zeros), axis=0)
height = mx.nd.max(nd.stack(bbox[:, 3] - bbox[:, 1], zeros), axis=0)
return width * height
示例13: relative_error
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def relative_error(y_hat, y_exact):
return nd.max(
nd.max(nd.abs(y_exact - y_hat), axis=1)
/ nd.max(nd.abs(y_exact), axis=1)
)
示例14: get_aggregate_fn
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def get_aggregate_fn(agg):
"""Internal function to get the aggregation function for node data
generated from different relations.
Parameters
----------
agg : str
Method for aggregating node features generated by different relations.
Allowed values are 'sum', 'max', 'min', 'mean', 'stack'.
Returns
-------
callable
Aggregator function that takes a list of tensors to aggregate
and returns one aggregated tensor.
"""
if agg == 'sum':
fn = nd.sum
elif agg == 'max':
fn = nd.max
elif agg == 'min':
fn = nd.min
elif agg == 'mean':
fn = nd.mean
elif agg == 'stack':
fn = None # will not be called
else:
raise DGLError('Invalid cross type aggregator. Must be one of '
'"sum", "max", "min", "mean" or "stack". But got "%s"' % agg)
if agg == 'stack':
def stack_agg(inputs, dsttype): # pylint: disable=unused-argument
if len(inputs) == 0:
return None
return nd.stack(*inputs, axis=1)
return stack_agg
else:
def aggfn(inputs, dsttype): # pylint: disable=unused-argument
if len(inputs) == 0:
return None
stacked = nd.stack(*inputs, axis=0)
return fn(stacked, axis=0)
return aggfn
示例15: alpha_pose_image_cropper
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import max [as 别名]
def alpha_pose_image_cropper(source_img, boxes, scores, output_shape=(256, 192)):
if boxes is None:
return None, boxes
# crop person poses
img_width, img_height = source_img.shape[1], source_img.shape[0]
tensors = mx.nd.zeros([boxes.shape[0], 3, output_shape[0], output_shape[1]])
out_boxes = np.zeros([boxes.shape[0], 4])
for i, box in enumerate(boxes.asnumpy()):
img = source_img.copy()
box_width = box[2] - box[0]
box_height = box[3] - box[1]
if box_width > 100:
scale_rate = 0.2
else:
scale_rate = 0.3
# crop image
left = int(max(0, box[0] - box_width * scale_rate / 2))
up = int(max(0, box[1] - box_height * scale_rate / 2))
right = int(min(img_width - 1,
max(left + 5, box[2] + box_width * scale_rate / 2)))
bottom = int(min(img_height - 1,
max(up + 5, box[3] + box_height * scale_rate / 2)))
crop_width = right - left
if crop_width < 1:
continue
crop_height = bottom - up
if crop_height < 1:
continue
ul = np.array((left, up))
br = np.array((right, bottom))
img = cv_cropBox(img, ul, br, output_shape[0], output_shape[1])
img = mx.nd.image.to_tensor(mx.nd.array(img))
# img = img.transpose((2, 0, 1))
img[0] = img[0] - 0.406
img[1] = img[1] - 0.457
img[2] = img[2] - 0.480
assert img.shape[0] == 3
tensors[i] = img
out_boxes[i] = (left, up, right, bottom)
return tensors, out_boxes