本文整理匯總了Python中mxnet.autograd.pause方法的典型用法代碼示例。如果您正苦於以下問題:Python autograd.pause方法的具體用法?Python autograd.pause怎麽用?Python autograd.pause使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.autograd
的用法示例。
在下文中一共展示了autograd.pause方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: hybrid_forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
"""Hybrid forward"""
if not autograd.is_training():
return F.BatchNorm(x, gamma, beta, running_mean, running_var, name='fwd',
**self._kwargs)
isum, isqu = F.SumSquare(x)
#isum = x.sum(axis=1, exclude=True)
#isqu = (x**2).sum(axis=1, exclude=True)
N = self.ndevices * x.shape[0] * x.shape[2] * x.shape[3]
allreduce = AllReduce(self._prefix)
osum, osqu = allreduce(isum, isqu)
# calc mean and std
mean = osum / N
sumvar = osqu - osum * osum / N
bias_var = sumvar / N
std = F.sqrt(F.maximum(bias_var, self.eps))
# update running mean and var
with autograd.pause():
unbias_var = sumvar / (N - 1)
self.updater(self.running_mean, self.running_var, mean, unbias_var,
self.momentum, x.context)
# update running mean and var
output = F.DecoupleBatchNorm(x, gamma, beta, mean, std)
return output
示例2: hybrid_forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def hybrid_forward(self, F, box_preds, gt_boxes):
"""Short summary.
Parameters
----------
F : mxnet.nd or mxnet.sym
`F` is mxnet.sym if hybridized or mxnet.nd if not.
box_preds : mxnet.nd.NDArray
Predicted bounding boxes.
gt_boxes : mxnet.nd.NDArray
Ground-truth bounding boxes.
Returns
-------
(tuple of) mxnet.nd.NDArray
objectness: 0 for negative, 1 for positive, -1 for ignore.
center_targets: regression target for center x and y.
scale_targets: regression target for scale x and y.
weights: element-wise gradient weights for center_targets and scale_targets.
class_targets: a one-hot vector for classification.
"""
with autograd.pause():
box_preds = box_preds.reshape((0, -1, 4))
objness_t = F.zeros_like(box_preds.slice_axis(axis=-1, begin=0, end=1))
center_t = F.zeros_like(box_preds.slice_axis(axis=-1, begin=0, end=2))
scale_t = F.zeros_like(box_preds.slice_axis(axis=-1, begin=0, end=2))
weight_t = F.zeros_like(box_preds.slice_axis(axis=-1, begin=0, end=2))
class_t = F.ones_like(objness_t.tile(reps=(self._num_class))) * -1
batch_ious = self._batch_iou(box_preds, gt_boxes) # (B, N, M)
ious_max = batch_ious.max(axis=-1, keepdims=True) # (B, N, 1)
objness_t = (ious_max > self._ignore_iou_thresh) * -1 # use -1 for ignored
return objness_t, center_t, scale_t, weight_t, class_t
示例3: hybrid_forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def hybrid_forward(self, F, anchor, score, bbox_pred, img):
"""
Generate proposals.
"""
with autograd.pause():
# restore bounding boxes
roi = self._box_decoder(bbox_pred, anchor)
# clip rois to image's boundary
# roi = F.Custom(roi, img, op_type='bbox_clip_to_image')
roi = self._clipper(roi, img)
# remove bounding boxes that don't meet the min_size constraint
# by setting them to (-1, -1, -1, -1)
# width = roi.slice_axis(axis=-1, begin=2, end=3)
# height = roi.slice_axis(axis=-1, begin=3, end=None)
xmin, ymin, xmax, ymax = roi.split(axis=-1, num_outputs=4)
width = xmax - xmin + 1.0
height = ymax - ymin + 1.0
# TODO:(zhreshold), there's im_ratio to handle here, but it requires
# add' info, and we don't expect big difference
invalid = (width < self._min_size) + (height < self._min_size)
# # remove out of bound anchors
# axmin, aymin, axmax, aymax = F.split(anchor, axis=-1, num_outputs=4)
# # it's a bit tricky to get right/bottom boundary in hybridblock
# wrange = F.arange(0, 2560).reshape((1, 1, 1, 2560)).slice_like(
# img, axes=(3)).max().reshape((1, 1, 1))
# hrange = F.arange(0, 2560).reshape((1, 1, 2560, 1)).slice_like(
# img, axes=(2)).max().reshape((1, 1, 1))
# invalid = (axmin < 0) + (aymin < 0) + F.broadcast_greater(axmax, wrange) + \
# F.broadcast_greater(aymax, hrange)
# avoid invalid anchors suppress anchors with 0 confidence
score = F.where(invalid, F.ones_like(invalid) * -1, score)
invalid = F.broadcast_axes(invalid, axis=2, size=4)
roi = F.where(invalid, F.ones_like(invalid) * -1, roi)
pre = F.concat(score, roi, dim=-1)
return pre
示例4: forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def forward(self, bbox, anchor, width, height):
"""
RPNTargetGenerator is only used in data transform with no batch dimension.
Be careful there's numpy operations inside
Parameters
----------
bbox: (M, 4) ground truth boxes with corner encoding.
anchor: (N, 4) anchor boxes with corner encoding.
width: int width of input image
height: int height of input image
Returns
-------
cls_target: (N,) value +1: pos, 0: neg, -1: ignore
box_target: (N, 4) only anchors whose cls_target > 0 has nonzero box target
box_mask: (N, 4) only anchors whose cls_target > 0 has nonzero mask
"""
with autograd.pause():
# calculate ious between (N, 4) anchors and (M, 4) bbox ground-truths
# ious is (N, M)
ious = mx.nd.contrib.box_iou(anchor, bbox, format='corner').asnumpy()
# mask out invalid anchors, (N, 4)
a_xmin, a_ymin, a_xmax, a_ymax = mx.nd.split(anchor, 4, axis=-1)
invalid_mask = (a_xmin < 0) + (a_ymin < 0) + (a_xmax >= width) + (a_ymax >= height)
ious = np.where(invalid_mask.asnumpy(), -1.0, ious)
samples, matches = self._sampler(ious)
# training targets for RPN
cls_target, _ = self._cls_encoder(samples)
box_target, box_mask = self._box_encoder(
np.expand_dims(samples, axis=0), np.expand_dims(matches, axis=0),
np.expand_dims(anchor.asnumpy(), axis=0), np.expand_dims(bbox.asnumpy(), axis=0))
return cls_target, box_target[0], box_mask[0]
示例5: hybrid_forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def hybrid_forward(self, F, roi, samples, matches, gt_label, gt_box):
"""Components can handle batch images
Parameters
----------
roi: (B, N, 4), input proposals
samples: (B, N), value +1: positive / -1: negative.
matches: (B, N), value [0, M), index to gt_label and gt_box.
gt_label: (B, M), value [0, num_class), excluding background class.
gt_box: (B, M, 4), input ground truth box corner coordinates.
Returns
-------
cls_target: (B, N), value [0, num_class + 1), including background.
box_target: (B, N, C, 4), only foreground class has nonzero target.
box_weight: (B, N, C, 4), only foreground class has nonzero weight.
"""
with autograd.pause():
# cls_target (B, N)
cls_target = self._cls_encoder(samples, matches, gt_label)
# box_target, box_weight (C, B, N, 4)
box_target, box_mask, indices = self._box_encoder(samples, matches, roi, gt_label,
gt_box)
return cls_target, box_target, box_mask, indices
示例6: hybrid_forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def hybrid_forward(self, F, anchor, score, bbox_pred, img):
"""
Generate proposals. Limit to batch-size=1 in current implementation.
"""
with autograd.pause():
# restore bounding boxes
roi = self._box_decoder(bbox_pred, self._box_to_center(anchor))
# clip rois to image's boundary
# roi = F.Custom(roi, img, op_type='bbox_clip_to_image')
roi = self._clipper(roi, img)
# remove bounding boxes that don't meet the min_size constraint
# by setting them to (-1, -1, -1, -1)
# width = roi.slice_axis(axis=-1, begin=2, end=3)
# height = roi.slice_axis(axis=-1, begin=3, end=None)
xmin, ymin, xmax, ymax = roi.split(axis=-1, num_outputs=4)
width = xmax - xmin
height = ymax - ymin
# TODO:(zhreshold), there's im_ratio to handle here, but it requires
# add' info, and we don't expect big difference
invalid = (width < self._min_size) + (height < self._min_size)
# # remove out of bound anchors
# axmin, aymin, axmax, aymax = F.split(anchor, axis=-1, num_outputs=4)
# # it's a bit tricky to get right/bottom boundary in hybridblock
# wrange = F.arange(0, 2560).reshape((1, 1, 1, 2560)).slice_like(
# img, axes=(3)).max().reshape((1, 1, 1))
# hrange = F.arange(0, 2560).reshape((1, 1, 2560, 1)).slice_like(
# img, axes=(2)).max().reshape((1, 1, 1))
# invalid = (axmin < 0) + (aymin < 0) + F.broadcast_greater(axmax, wrange) + \
# F.broadcast_greater(aymax, hrange)
# avoid invalid anchors suppress anchors with 0 confidence
score = F.where(invalid, F.ones_like(invalid) * -1, score)
invalid = F.repeat(invalid, axis=-1, repeats=4)
roi = F.where(invalid, F.ones_like(invalid) * -1, roi)
pre = F.concat(score, roi, dim=-1)
return pre
示例7: forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def forward(self, bbox, anchor, width, height):
"""
RPNTargetGenerator is only used in data transform with no batch dimension.
Be careful there's numpy operations inside
Parameters
----------
bbox: (M, 4) ground truth boxes with corner encoding.
anchor: (N, 4) anchor boxes with corner encoding.
width: int width of input image
height: int height of input image
Returns
-------
cls_target: (N,) value +1: pos, 0: neg, -1: ignore
box_target: (N, 4) only anchors whose cls_target > 0 has nonzero box target
box_mask: (N, 4) only anchors whose cls_target > 0 has nonzero mask
"""
with autograd.pause():
# calculate ious between (N, 4) anchors and (M, 4) bbox ground-truths
# ious is (N, M)
ious = mx.nd.contrib.box_iou(anchor, bbox, format='corner').asnumpy()
# mask out invalid anchors, (N, 4)
a_xmin, a_ymin, a_xmax, a_ymax = mx.nd.split(anchor, 4, axis=-1)
invalid_mask = (a_xmin < 0) + (a_ymin < 0) + (a_xmax >= width) + (a_ymax >= height)
ious = np.where(invalid_mask.asnumpy(), -1.0, ious)
samples, matches = self._sampler(ious)
# training targets for RPN
cls_target, _ = self._cls_encoder(samples)
box_target, box_mask = self._box_encoder(
np.expand_dims(samples, axis=0), np.expand_dims(matches, axis=0),
np.expand_dims(anchor.asnumpy(), axis=0), np.expand_dims(bbox.asnumpy(), axis=0))
return mx.nd.array(cls_target, ctx=bbox.context), \
mx.nd.array(box_target[0], ctx=bbox.context), \
mx.nd.array(box_mask[0], ctx=bbox.context)
示例8: forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def forward(self, roi, samples, matches, gt_label, gt_box):
"""Components can handle batch images
Parameters
----------
roi: (B, N, 4), input proposals
samples: (B, N), value +1: positive / -1: negative.
matches: (B, N), value [0, M), index to gt_label and gt_box.
gt_label: (B, M), value [0, num_class), excluding background class.
gt_box: (B, M, 4), input ground truth box corner coordinates.
Returns
-------
cls_target: (B, N), value [0, num_class + 1), including background.
box_target: (B, N, C, 4), only foreground class has nonzero target.
box_weight: (B, N, C, 4), only foreground class has nonzero weight.
"""
with autograd.pause():
# cls_target (B, N)
cls_target = self._cls_encoder(samples, matches, gt_label)
# box_target, box_weight (C, B, N, 4)
box_target, box_mask = self._box_encoder(
samples, matches, roi, gt_label, gt_box)
# modify shapes to match predictions
# box (C, B, N, 4) -> (B, N, C, 4)
box_target = box_target.transpose((1, 2, 0, 3))
box_mask = box_mask.transpose((1, 2, 0, 3))
return cls_target, box_target, box_mask
示例9: hybrid_forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def hybrid_forward(self, F, x, *args, **kw):
"""
This function does all the pre-processes and post-processes for the execution of a InferenceAlgorithm.
:param F: the MXNet computation mode
:type F: mxnet.symbol or mxnet.ndarray
:param x: a dummy variable to enable the execution of this Gluon block
:type x: MXNet NDArray or MXNet Symbol
:param *arg: all the positional arguments, which correspond to the data provided to the InferenceAlgorithm.
:type *arg: list of MXNet NDArray or MXNet Symbol
:param **kw: all the keyword arguments, which correspond to the parameters that may require gradients.
:type kw: {str(UUID): MXNet NDArray or MXNet Symbol}
:returns: the outcome of the InferenceAlgorithm that are determined by the inference algorithm.
:rtypes: {str: MXNet NDArray or MXNet Symbol}
"""
for to_uuid, from_uuid in self._var_ties.items():
kw[to_uuid] = kw[from_uuid]
data = {k: v for k, v in zip(self._data_def, args)}
variables = add_sample_dimension_to_arrays(F, data)
for k, v in self._var_trans.items():
kw[k] = v.transform(kw[k], F=F)
add_sample_dimension_to_arrays(F, kw, out=variables)
add_sample_dimension_to_arrays(F, self._constants, out=variables)
obj = self._infr_method.compute(F=F, variables=variables)
with autograd.pause():
# An inference algorithm may directly set the value of a parameter instead of computing its gradient.
# This part handles the setting of parameters.
for k, v in variables.items():
if k.startswith(SET_PARAMETER_PREFIX):
self._infr_params[v[0]] = v[1]
return obj
示例10: compute
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def compute(self, F, variables):
has_mean = self.model.F.factor.has_mean
X = variables[self.model.X]
Y = variables[self.model.Y]
noise_var = variables[self.model.noise_var]
D = Y.shape[-1]
N = X.shape[-2]
kern = self.model.kernel
kern_params = kern.fetch_parameters(variables)
X, Y, noise_var, kern_params = arrays_as_samples(
F, [X, Y, noise_var, kern_params])
K = kern.K(F, X, **kern_params) + \
F.expand_dims(F.eye(N, dtype=X.dtype), axis=0) * \
F.expand_dims(noise_var, axis=-2)
if self.jitter > 0.:
K = K + F.expand_dims(F.eye(N, dtype=X.dtype), axis=0) * \
self.jitter
L = F.linalg.potrf(K)
if has_mean:
mean = variables[self.model.mean]
Y = Y - mean
LinvY = F.linalg.trsm(L, Y)
logdet_l = F.linalg.sumlogdiag(F.abs(L))
tmp = F.sum(F.reshape(F.square(LinvY) + np.log(2. * np.pi),
shape=(Y.shape[0], -1)), axis=-1)
logL = - logdet_l * D - tmp/2
with autograd.pause():
self.set_parameter(variables, self.posterior.X, X[0])
self.set_parameter(variables, self.posterior.L, L[0])
self.set_parameter(variables, self.posterior.LinvY, LinvY[0])
return logL
示例11: add_batchid
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def add_batchid(self, F, bbox):
num_roi = self._num_sample if autograd.is_training() else self._rpn_test_post_nms
with autograd.pause():
roi_batchid = F.arange(0, self._max_batch, repeat=num_roi)
# remove batch dim because ROIPooling require 2d input
roi = F.concat(*[roi_batchid.reshape((-1, 1)), bbox.reshape((-1, 4))], dim=-1)
roi = F.stop_gradient(roi)
return roi
示例12: decode_bbox
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def decode_bbox(self, source_bbox, encoded_bbox, stds):
with autograd.pause():
box_decoder = NormalizedBoxCenterDecoder(stds=stds)
roi = box_decoder(encoded_bbox, self.box_to_center(source_bbox))
#roi = roi.reshape((1,-1, 4))
return roi
# pylint: disable=arguments-differ
示例13: forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def forward(self, roi):
F = mx.nd
with autograd.pause():
for i in range(self._rpn_train_pre_nms):
if roi[0,i,0] == -1:
#index.append([i])
break
#rpn_index = F.Custom(roi, op_type='clip_rpn_box')
roi = roi.slice_axis(axis=1, begin=0, end=i)
return roi
示例14: forward
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def forward(self, roi, samples, matches, gt_label, gt_box):
"""Components can handle batch images
Parameters
----------
roi: (B, N, 4), input proposals
samples: (B, N), value +1: positive / -1: negative.
matches: (B, N), value [0, M), index to gt_label and gt_box.
gt_label: (B, M), value [0, num_class), excluding background class.
gt_box: (B, M, 4), input ground truth box corner coordinates.
Returns
-------
cls_target: (B, N), value [0, num_class + 1), including background.
box_target: (B, N, C, 4), only foreground class has nonzero target.
box_weight: (B, N, C, 4), only foreground class has nonzero weight.
"""
with autograd.pause():
# cls_target (B, N)
cls_target = self._cls_encoder(samples, matches, gt_label)
# box_target, box_weight (C, B, N, 4)
box_target, box_mask = self._box_encoder(
samples, matches, roi, gt_box)
# modify shapes to match predictions
# box (C, B, N, 4) -> (B, N, C, 4)
#print("cls_target:{} box_target:{} box_mask:{}".format(cls_target.shape,box_target.shape,box_mask.shape))
#cls_target = cls_target
box_target = box_target.expand_dims(axis=2)
box_mask = box_mask.expand_dims(axis=2)
return cls_target, box_target, box_mask
示例15: sample
# 需要導入模塊: from mxnet import autograd [as 別名]
# 或者: from mxnet.autograd import pause [as 別名]
def sample(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
with autograd.pause():
s = self.base_distribution.sample(
num_samples=num_samples, dtype=dtype
)
for t in self.transforms:
s = t.f(s)
return s