本文整理匯總了Python中mxnet.nd.stack方法的典型用法代碼示例。如果您正苦於以下問題:Python nd.stack方法的具體用法?Python nd.stack怎麽用?Python nd.stack使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.nd
的用法示例。
在下文中一共展示了nd.stack方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: crop_resize_normalize
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def crop_resize_normalize(img, bbox_list, output_size,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
output_list = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
for bbox in bbox_list:
x0 = max(int(bbox[0]), 0)
y0 = max(int(bbox[1]), 0)
x1 = min(int(bbox[2]), int(img.shape[1]))
y1 = min(int(bbox[3]), int(img.shape[0]))
w = x1 - x0
h = y1 - y0
res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0]))
res_img = transform_test(res_img)
output_list.append(res_img)
output_array = nd.stack(*output_list)
return output_array
示例2: default_mp_pad_batchify_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def default_mp_pad_batchify_fn(data):
"""Use shared memory for collating data into batch, labels are padded to same shape"""
if isinstance(data[0], nd.NDArray):
out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
ctx=context.Context('cpu_shared', 0))
return nd.stack(*data, out=out)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_mp_pad_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
batch_size = len(data)
pad = max([l.shape[0] for l in data] + [1,])
buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
for i, l in enumerate(data):
buf[i][:l.shape[0], :] = l
return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0))
示例3: crop_resize_normalize
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def crop_resize_normalize(img, bbox_list, output_size):
output_list = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
for bbox in bbox_list:
x0 = max(int(bbox[0]), 0)
y0 = max(int(bbox[1]), 0)
x1 = min(int(bbox[2]), int(img.shape[1]))
y1 = min(int(bbox[3]), int(img.shape[0]))
w = x1 - x0
h = y1 - y0
res_img = image.fixed_crop(nd.array(img), x0, y0, w, h, (output_size[1], output_size[0]))
res_img = transform_test(res_img)
output_list.append(res_img)
output_array = nd.stack(*output_list)
return output_array
示例4: bbox_overlaps
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def bbox_overlaps(anchors:mx.nd.NDArray, gt:mx.nd.NDArray):
"""
Get IoU of the anchors and ground truth bounding boxes.
The shape of anchors and gt should be (N, 4) and (M, 4)
So the shape of return value is (N, M)
"""
ret = []
for i in range(gt.shape[0]):
cgt = gt[i].reshape((1, 4)).broadcast_to(anchors.shape)
# inter
x0 = nd.max(nd.stack(anchors[:,0], cgt[:,0]), axis=0)
y0 = nd.max(nd.stack(anchors[:,1], cgt[:,1]), axis=0)
x1 = nd.min(nd.stack(anchors[:,2], cgt[:,2]), axis=0)
y1 = nd.min(nd.stack(anchors[:,3], cgt[:,3]), axis=0)
inter = _get_area(nd.concatenate([x0.reshape((-1, 1)),
y0.reshape((-1, 1)),
x1.reshape((-1, 1)),
y1.reshape((-1, 1))], axis=1))
outer = _get_area(anchors) + _get_area(cgt) - inter
iou = inter / outer
ret.append(iou.reshape((-1, 1)))
ret=nd.concatenate(ret, axis=1)
return ret
示例5: hybrid_forward
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def hybrid_forward(
self, F, past_target: Tensor, past_valid_length: Tensor
) -> Tuple[Tensor, Tensor]:
"""
Return two tensors, of shape
(batch_size, num_samples, max_prediction_length, target_dim)
and (batch_size, num_samples) respectively.
"""
batch_size = past_target.shape[0]
assert past_valid_length.shape[0] == batch_size
target_shape = (batch_size, self.num_parallel_samples, 25)
pred_target = nd.stack(
nd.random.uniform(shape=target_shape),
nd.random.randint(0, 10, shape=target_shape).astype(np.float32),
axis=-1,
)
pred_valid_length = nd.random.randint(
15, 25 + 1, shape=target_shape[:2]
)
return pred_target, pred_valid_length
示例6: extract_edge_bbox
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def extract_edge_bbox(g):
'''bbox encoding'''
src, dst = g.edges(order='eid')
n = g.number_of_edges()
src_bbox = g.ndata['pred_bbox'][src.asnumpy()]
dst_bbox = g.ndata['pred_bbox'][dst.asnumpy()]
edge_bbox = nd.zeros((n, 4), ctx=g.ndata['pred_bbox'].context)
edge_bbox[:,0] = nd.stack(src_bbox[:,0], dst_bbox[:,0]).min(axis=0)
edge_bbox[:,1] = nd.stack(src_bbox[:,1], dst_bbox[:,1]).min(axis=0)
edge_bbox[:,2] = nd.stack(src_bbox[:,2], dst_bbox[:,2]).max(axis=0)
edge_bbox[:,3] = nd.stack(src_bbox[:,3], dst_bbox[:,3]).max(axis=0)
return edge_bbox
示例7: default_pad_batchify_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def default_pad_batchify_fn(data):
"""Collate data into batch, labels are padded to same shape"""
if isinstance(data[0], nd.NDArray):
return nd.stack(*data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_pad_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
pad = max([l.shape[0] for l in data] + [1,])
buf = np.full((len(data), pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
for i, l in enumerate(data):
buf[i][:l.shape[0], :] = l
return nd.array(buf, dtype=data[0].dtype)
示例8: tsn_mp_batchify_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def tsn_mp_batchify_fn(data):
"""Collate data into batch. Use shared memory for stacking.
Modify default batchify function for temporal segment networks.
Change `nd.stack` to `nd.concat` since batch dimension already exists.
"""
if isinstance(data[0], nd.NDArray):
return nd.concat(*data, dim=0)
elif isinstance(data[0], tuple):
data = zip(*data)
return [tsn_mp_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return nd.array(data, dtype=data.dtype,
ctx=context.Context('cpu_shared', 0))
示例9: _get_area
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def _get_area(bbox:mx.nd.NDArray):
zeros = mx.nd.zeros_like(bbox[:, 0])
width = mx.nd.max(nd.stack(bbox[:, 2] - bbox[:, 0], zeros), axis=0)
height = mx.nd.max(nd.stack(bbox[:, 3] - bbox[:, 1], zeros), axis=0)
return width * height
示例10: select_class_generator
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def select_class_generator(class_id):
def select_class(img, label):
ret_label = []
for item in label:
if item[4] == class_id:
ret_label.append(item)
return img, np.stack(ret_label)
return select_class
示例11: forward
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def forward(self, feature, data, begin_state):
num_nodes, batch_size, length, _ = data.shape
data = nd.split(data, axis=2, num_outputs=length, squeeze_axis=1)
outputs, state = [], begin_state
for input in data:
output, state = self.forward_single(feature, input, state)
outputs.append(output)
outputs = nd.stack(*outputs, axis=2)
return outputs, state
示例12: get_aggregate_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def get_aggregate_fn(agg):
"""Internal function to get the aggregation function for node data
generated from different relations.
Parameters
----------
agg : str
Method for aggregating node features generated by different relations.
Allowed values are 'sum', 'max', 'min', 'mean', 'stack'.
Returns
-------
callable
Aggregator function that takes a list of tensors to aggregate
and returns one aggregated tensor.
"""
if agg == 'sum':
fn = nd.sum
elif agg == 'max':
fn = nd.max
elif agg == 'min':
fn = nd.min
elif agg == 'mean':
fn = nd.mean
elif agg == 'stack':
fn = None # will not be called
else:
raise DGLError('Invalid cross type aggregator. Must be one of '
'"sum", "max", "min", "mean" or "stack". But got "%s"' % agg)
if agg == 'stack':
def stack_agg(inputs, dsttype): # pylint: disable=unused-argument
if len(inputs) == 0:
return None
return nd.stack(*inputs, axis=1)
return stack_agg
else:
def aggfn(inputs, dsttype): # pylint: disable=unused-argument
if len(inputs) == 0:
return None
stacked = nd.stack(*inputs, axis=0)
return fn(stacked, axis=0)
return aggfn
示例13: ten_crop
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def ten_crop(src, size):
"""Crop 10 regions from an array.
This is performed same as:
http://chainercv.readthedocs.io/en/stable/reference/transforms.html#ten-crop
This method crops 10 regions. All regions will be in shape
:obj`size`. These regions consist of 1 center crop and 4 corner
crops and horizontal flips of them.
The crops are ordered in this order.
* center crop
* top-left crop
* bottom-left crop
* top-right crop
* bottom-right crop
* center crop (flipped horizontally)
* top-left crop (flipped horizontally)
* bottom-left crop (flipped horizontally)
* top-right crop (flipped horizontally)
* bottom-right crop (flipped horizontally)
Parameters
----------
src : mxnet.nd.NDArray
Input image.
size : tuple
Tuple of length 2, as (width, height) of the cropped areas.
Returns
-------
mxnet.nd.NDArray
The cropped images with shape (10, size[1], size[0], C)
"""
h, w, _ = src.shape
ow, oh = size
if h < oh or w < ow:
raise ValueError(
"Cannot crop area {} from image with size ({}, {})".format(str(size), h, w))
center = src[(h - oh) // 2:(h + oh) // 2, (w - ow) // 2:(w + ow) // 2, :]
tl = src[0:oh, 0:ow, :]
bl = src[h - oh:h, 0:ow, :]
tr = src[0:oh, w - ow:w, :]
br = src[h - oh:h, w - ow:w, :]
crops = nd.stack(*[center, tl, bl, tr, br], axis=0)
crops = nd.concat(*[crops, nd.flip(crops, axis=2)], dim=0)
return crops
示例14: decode_centernet
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def decode_centernet(heat, wh, reg=None, cat_spec_wh=False, K=100, flag_split=False):
batch, cat, height, width = heat.shape
# perform nms on heatmaps, find the peaks
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = nd.reshape(reg, (batch, K, 2))
xs = nd.reshape(xs, (batch, K, 1)) + reg[:, :, 0:1]
ys = nd.reshape(ys, (batch, K, 1)) + reg[:, :, 1:2]
else:
xs = nd.reshape(xs, (batch, K, 1)) + 0.5
ys = nd.reshape(ys, (batch, K, 1)) + 0.5
wh = _tranpose_and_gather_feat(wh, inds)
if cat_spec_wh:
wh = nd.reshape(wh, (batch, K, cat, 2))
clses_ind = nd.reshape(clses, (batch, K, 1, 1))
clses_ind = nd.stack(clses_ind, clses_ind, axis=3) #becomes (batch, K, 1, 2)
clses_ind = clses_ind.astype('int64')
wh = wh.gather_nd(2, clses_ind)
wh = nd.reshape(wh, (batch, K, 2))
else:
wh = nd.reshape(wh, (batch, K, 2))
clses = nd.reshape(clses, (batch, K, 1)).astype('float32')
scores = nd.reshape(scores, (batch, K, 1))
bboxes = nd.concat(xs - wh[:, :, 0:1] / 2,
ys - wh[:, :, 1:2] / 2,
xs + wh[:, :, 0:1] / 2,
ys + wh[:, :, 1:2] / 2,
dim=2)
if flag_split is True:
return bboxes, scores, clses
else:
detections = nd.concat(bboxes, scores, clses, dim=2)
return detections
示例15: forward
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import stack [as 別名]
def forward(self, feature, label, begin_states, is_training):
''' Decode the hidden states to a temporal sequence.
Parameters
----------
feature: a NDArray with shape [n, d].
label: a NDArray with shape [n, b, t, d].
begin_states: a list of hidden states (list of hidden units with shape [n, b, d]) of RNNs.
is_training: bool
Returns
-------
outputs: the prediction, which is a NDArray with shape [n, b, t, d]
'''
ctx = label.context
num_nodes, batch_size, seq_len, _ = label.shape
aux = label[:,:,:, self.output_dim:] # [n,b,t,d]
label = label[:,:,:, :self.output_dim] # [n,b,t,d]
go = nd.zeros(shape=(num_nodes, batch_size, self.input_dim), ctx=ctx)
output, states = [], begin_states
for i in range(seq_len):
# get next input
if i == 0: data = go
else:
prev = nd.concat(output[i - 1], aux[:,:,i - 1], dim=-1)
truth = nd.concat(label[:,:,i - 1], aux[:,:,i - 1], dim=-1)
if is_training and self.use_sampling: value = self.sampling()
else: value = 0
data = value * truth + (1 - value) * prev
# unroll 1 step
for depth, cell in enumerate(self.cells):
data, states[depth] = cell.forward_single(feature, data, states[depth])
if self.graphs[depth] is not None:
_data = 0
for g in self.graphs[depth]:
_data = _data + g(data, feature)
data = _data / len(self.graphs[depth])
# append feature to output
_feature = nd.expand_dims(feature, axis=1) # [n, 1, d]
_feature = nd.broadcast_to(_feature, shape=(0, batch_size, 0)) # [n, b, d]
data = nd.concat(data, _feature, dim=-1) # [n, b, t, d]
# proj output to prediction
data = nd.reshape(data, shape=(num_nodes * batch_size, -1))
data = self.proj(data)
data = nd.reshape(data, shape=(num_nodes, batch_size, -1))
output.append(data)
output = nd.stack(*output, axis=2)
return output