本文整理汇总了Python中mxnet.nd.concat方法的典型用法代码示例。如果您正苦于以下问题:Python nd.concat方法的具体用法?Python nd.concat怎么用?Python nd.concat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.nd
的用法示例。
在下文中一共展示了nd.concat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predict_batch
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def predict_batch(model, ctx, x, n_pred):
'''
Parameters
----------
x: mx.ndarray, shape is (batch_size, 1, n_his, num_of_vertices)
Returns
----------
mx.ndarray, shape is (batch_size, 1, n_pred, num_of_vertices)
'''
predicts = []
for pred_idx in range(n_pred):
x_input = nd.concat(x, *predicts, dim=2)[:, :, - n_pred:, :]
predicts.append(model(x_input.as_in_context(ctx))
.as_in_context(mx.cpu()))
return nd.concat(*predicts, dim=2)
示例2: extract_pairwise_multi_position_embedding_nd
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def extract_pairwise_multi_position_embedding_nd(position_mat, feat_dim, wave_length=1000):
""" Extract multi-class position embedding
Args:
position_mat: [num_fg_classes, num_rois, num_rois, 4]
feat_dim: dimension of embedding feature
wave_length:
Returns:
embedding: [num_fg_classes, num_rois, num_rois, feat_dim]
"""
feat_range = nd.arange(0, feat_dim / 8)
dim_mat = nd.broadcast_power(lhs=nd.full((1,), wave_length),
rhs=(8. / feat_dim) * feat_range)
dim_mat = nd.Reshape(dim_mat, shape=(1, 1, 1, 1, -1))
position_mat = nd.expand_dims(100.0 * position_mat, axis=4)
div_mat = nd.broadcast_div(lhs=position_mat, rhs=dim_mat)
sin_mat = nd.sin(data=div_mat)
cos_mat = nd.cos(data=div_mat)
# embedding, [num_fg_classes, num_rois, num_rois, 4, feat_dim/4]
embedding = nd.concat(sin_mat, cos_mat, dim=4)
embedding = nd.Reshape(embedding, shape=(0, 0, 0, feat_dim))
return embedding
示例3: __next__
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def __next__(self):
balanced_batch_images = []
balanced_batch_texts = []
for i, data_loader_iter in enumerate(self.dataloader_iter_list):
try:
image, text = next(data_loader_iter)
balanced_batch_images.append(image)
balanced_batch_texts.append(text)
except StopIteration:
self.dataloader_iter_list[i] = iter(self.data_loader_list[i])
image, text = next(self.dataloader_iter_list[i])
balanced_batch_images.append(image)
balanced_batch_texts.append(text)
except ValueError:
pass
balanced_batch_images = nd.concat(*balanced_batch_images, dim=0)
balanced_batch_texts = nd.concat(*balanced_batch_texts, dim=0)
return balanced_batch_images, balanced_batch_texts
示例4: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def forward(self, x):
x_t = nd.transpose(x, axes=(0,2,1))
conv3_out = self.conv3(x_t)
conv5_out = self.conv5(conv3_out) + conv3_out
conv7_out = self.conv7(conv5_out) + conv5_out
# conv_out = nd.concat(*[conv3_out, conv5_out, conv7_out], dim=1)
conv_out = self.conv_drop(conv7_out)
conv_max_pooled = self.conv_maxpool(conv_out)
gru_out = self.gru(x)
gru_out_t = nd.transpose(gru_out, axes=(0,2,1))
# gru_pooled = nd.transpose(gru_out, axes=(0,2,1))
# gru_maxpooled = self.gru_post_max(gru_out_t)
# return gru_maxpooled
# gru_avepooled = self.gru_post_ave(gru_out_t)
# gru_pooled = nd.concat(*[gru_maxpooled, gru_avepooled], dim=1)
# gru_pooled = nd.concat(*[gru_maxpooled, gru_avepooled], dim=1)
gru_maxpooled = self.gru_maxpool(gru_out_t)
# gru_avepooled = self.gru_maxpool(gru_out_t)
# gru_pooled = nd.concat(*[gru_maxpooled, gru_avepooled], dim=1)
# conv_ave_pooled = self.conv_avepool(conv_out)
concated_feature = nd.concat(*[gru_maxpooled, conv_max_pooled], dim=1)
return concated_feature
示例5: msg_edge
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def msg_edge(self, edge):
state = nd.concat(edge.src['state'], edge.dst['state'], dim=-1)
feature = nd.concat(edge.src['feature'], edge.dst['feature'], edge.data['dist'], dim=-1)
# generate weight by meta-learner
weight = self.w_mlp(feature)
weight = nd.reshape(weight, shape=(-1, self.hidden_size * 2, self.hidden_size))
# reshape state to [n, b * t, d] for batch_dot (currently mxnet only support batch_dot for 3D tensor)
shape = state.shape
state = nd.reshape(state, shape=(shape[0], -1, shape[-1]))
alpha = nd.LeakyReLU(nd.batch_dot(state, weight))
# reshape alpha to [n, b, t, d]
alpha = nd.reshape(alpha, shape=shape[:-1] + (self.hidden_size,))
return { 'alpha': alpha, 'state': edge.src['state'] }
示例6: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def forward(self, samples, matches, anchors, refs):
"""Forward"""
F = nd
# TODO(zhreshold): batch_pick, take multiple elements?
ref_boxes = nd.repeat(refs.reshape((0, 1, -1, 4)), axis=1, repeats=matches.shape[1])
ref_boxes = nd.split(ref_boxes, axis=-1, num_outputs=4, squeeze_axis=True)
ref_boxes = nd.concat(*[F.pick(ref_boxes[i], matches, axis=2).reshape((0, -1, 1)) \
for i in range(4)], dim=2)
g = self.corner_to_center(ref_boxes)
a = self.corner_to_center(anchors)
t0 = ((g[0] - a[0]) / a[2] - self._means[0]) / self._stds[0]
t1 = ((g[1] - a[1]) / a[3] - self._means[1]) / self._stds[1]
t2 = (F.log(g[2] / a[2]) - self._means[2]) / self._stds[2]
t3 = (F.log(g[3] / a[3]) - self._means[3]) / self._stds[3]
codecs = F.concat(t0, t1, t2, t3, dim=2)
temp = F.tile(samples.reshape((0, -1, 1)), reps=(1, 1, 4)) > 0.5
targets = F.where(temp, codecs, F.zeros_like(codecs))
masks = F.where(temp, F.ones_like(temp), F.zeros_like(temp))
return targets, masks
示例7: test_layer_bidirectional
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def test_layer_bidirectional():
class RefBiLSTM(gluon.Block):
def __init__(self, size, **kwargs):
super(RefBiLSTM, self).__init__(**kwargs)
with self.name_scope():
self._lstm_fwd = gluon.rnn.LSTM(size, bidirectional=False, prefix='l0')
self._lstm_bwd = gluon.rnn.LSTM(size, bidirectional=False, prefix='r0')
def forward(self, inpt):
fwd = self._lstm_fwd(inpt)
bwd_inpt = nd.flip(inpt, 0)
bwd = self._lstm_bwd(bwd_inpt)
bwd = nd.flip(bwd, 0)
return nd.concat(fwd, bwd, dim=2)
size = 7
in_size = 5
weights = {}
for d in ['l', 'r']:
weights['lstm_{}0_i2h_weight'.format(d)] = mx.random.uniform(shape=(size*4, in_size))
weights['lstm_{}0_h2h_weight'.format(d)] = mx.random.uniform(shape=(size*4, size))
weights['lstm_{}0_i2h_bias'.format(d)] = mx.random.uniform(shape=(size*4,))
weights['lstm_{}0_h2h_bias'.format(d)] = mx.random.uniform(shape=(size*4,))
net = gluon.rnn.LSTM(size, bidirectional=True, prefix='lstm_')
ref_net = RefBiLSTM(size, prefix='lstm_')
net.initialize()
ref_net.initialize()
net_params = net.collect_params()
ref_net_params = ref_net.collect_params()
for k in weights:
net_params[k].set_data(weights[k])
ref_net_params[k.replace('l0', 'l0l0').replace('r0', 'r0l0')].set_data(weights[k])
data = mx.random.uniform(shape=(3, 10, in_size))
assert_allclose(net(data).asnumpy(), ref_net(data).asnumpy())
示例8: bbox_improve
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def bbox_improve(bbox):
'''bbox encoding'''
area = (bbox[:,2] - bbox[:,0]) * (bbox[:,3] - bbox[:,1])
return nd.concat(bbox, area.expand_dims(1))
示例9: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def forward(self, edges):
feat = nd.concat(edges.src['pred_bbox'], edges.dst['pred_bbox'],
edges.data['rel_bbox'], edges.data['pred_bbox_additional'])
out = self.mlp(feat)
return {'spatial': out}
示例10: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def forward(self, graph, feat):
r"""Compute set2set pooling.
Parameters
----------
graph : DGLGraph
The graph.
feat : mxnet.NDArray
The input feature with shape :math:`(N, D)` where
:math:`N` is the number of nodes in the graph.
Returns
-------
mxnet.NDArray
The output feature with shape :math:`(B, D)`, where
:math:`B` refers to the batch size.
"""
with graph.local_scope():
batch_size = graph.batch_size
h = (nd.zeros((self.n_layers, batch_size, self.input_dim), ctx=feat.context),
nd.zeros((self.n_layers, batch_size, self.input_dim), ctx=feat.context))
q_star = nd.zeros((batch_size, self.output_dim), ctx=feat.context)
for _ in range(self.n_iters):
q, h = self.lstm(q_star.expand_dims(axis=0), h)
q = q.reshape((batch_size, self.input_dim))
e = (feat * broadcast_nodes(graph, q)).sum(axis=-1, keepdims=True)
graph.ndata['e'] = e
alpha = softmax_nodes(graph, 'e')
graph.ndata['r'] = feat * alpha
readout = sum_nodes(graph, 'r')
q_star = nd.concat(q, readout, dim=-1)
return q_star
示例11: validate
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def validate(val_data, val_dataset, net, ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
val_metric.reset()
from tqdm import tqdm
for batch in tqdm(val_data):
data, scale, center, score, imgid = val_batch_fn(batch, ctx)
outputs = [net(X) for X in data]
if opt.flip_test:
data_flip = [nd.flip(X, axis=3) for X in data]
outputs_flip = [net(X) for X in data_flip]
outputs_flipback = [flip_heatmap(o, val_dataset.joint_pairs, shift=True) for o in outputs_flip]
outputs = [(o + o_flip)/2 for o, o_flip in zip(outputs, outputs_flipback)]
if opt.dsnt:
outputs = [net_dsnt(X)[0] for X in outputs]
if len(outputs) > 1:
outputs_stack = nd.concat(*[o.as_in_context(mx.cpu()) for o in outputs], dim=0)
else:
outputs_stack = outputs[0].as_in_context(mx.cpu())
if opt.dsnt:
preds = (outputs_stack - 0.5) * scale.expand_dims(axis=1) + center.expand_dims(axis=1)
maxvals = nd.ones(preds.shape[0:2]+(1, ))
else:
preds, maxvals = get_final_preds(outputs_stack, center.asnumpy(), scale.asnumpy())
val_metric.update(preds, maxvals, score, imgid)
metric_name, metric_score = val_metric.get()
print("Inference Completed! %s = %.4f" % (metric_name, metric_score))
return
示例12: validate
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def validate(val_data, val_dataset, net, ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
val_metric.reset()
from tqdm import tqdm
for batch in tqdm(val_data):
# data, scale, center, score, imgid = val_batch_fn(batch, ctx)
data, scale_box, score, imgid = val_batch_fn(batch, ctx)
outputs = [net(X) for X in data]
if opt.flip_test:
data_flip = [nd.flip(X, axis=3) for X in data]
outputs_flip = [net(X) for X in data_flip]
outputs_flipback = [flip_heatmap(o, val_dataset.joint_pairs, shift=True) for o in outputs_flip]
outputs = [(o + o_flip)/2 for o, o_flip in zip(outputs, outputs_flipback)]
if len(outputs) > 1:
outputs_stack = nd.concat(*[o.as_in_context(mx.cpu()) for o in outputs], dim=0)
else:
outputs_stack = outputs[0].as_in_context(mx.cpu())
# preds, maxvals = get_final_preds(outputs_stack, center.asnumpy(), scale.asnumpy())
preds, maxvals = heatmap_to_coord_alpha_pose(outputs_stack, scale_box)
# print(preds, maxvals, scale_box)
# print(preds, maxvals)
# raise
val_metric.update(preds, maxvals, score, imgid)
res = val_metric.get()
return
示例13: validate
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def validate(val_data, val_dataset, net, ctx, opt):
if isinstance(ctx, mx.Context):
ctx = [ctx]
val_metric = COCOKeyPointsMetric(val_dataset, 'coco_keypoints',
in_vis_thresh=0)
for batch in tqdm(val_data, dynamic_ncols=True):
# data, scale, center, score, imgid = val_batch_fn(batch, ctx)
data, scale_box, score, imgid = val_batch_fn(batch, ctx)
outputs = [net(X) for X in data]
if opt.flip_test:
data_flip = [nd.flip(X, axis=3) for X in data]
outputs_flip = [net(X) for X in data_flip]
outputs_flipback = [flip_heatmap(o, val_dataset.joint_pairs, shift=True) for o in outputs_flip]
outputs = [(o + o_flip) / 2 for o, o_flip in zip(outputs, outputs_flipback)]
if len(outputs) > 1:
outputs_stack = nd.concat(*[o.as_in_context(mx.cpu()) for o in outputs], dim=0)
else:
outputs_stack = outputs[0].as_in_context(mx.cpu())
# preds, maxvals = get_final_preds(outputs_stack, center.asnumpy(), scale.asnumpy())
preds, maxvals = heatmap_to_coord_alpha_pose(outputs_stack, scale_box)
val_metric.update(preds, maxvals, score, imgid)
nullwriter = NullWriter()
oldstdout = sys.stdout
sys.stdout = nullwriter
try:
res = val_metric.get()
finally:
sys.stdout = oldstdout
return res
示例14: _slice
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def _slice(self, x, num_anchors, num_offsets):
"""since some stages won't see partial anchors, so we have to slice the correct targets"""
# x with shape (B, N, A, 1 or 2)
anchors = [0] + num_anchors.tolist()
offsets = [0] + num_offsets.tolist()
ret = []
for i in range(len(num_anchors)):
y = x[:, offsets[i]:offsets[i+1], anchors[i]:anchors[i+1], :]
ret.append(y.reshape((0, -3, -1)))
return nd.concat(*ret, dim=1)
示例15: get_final_preds
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import concat [as 别名]
def get_final_preds(batch_heatmaps, center, scale):
coords, maxvals = get_max_pred(batch_heatmaps)
heatmap_height = batch_heatmaps.shape[2]
heatmap_width = batch_heatmaps.shape[3]
# post-processing
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = batch_heatmaps[n][p]
px = int(nd.floor(coords[n][p][0] + 0.5).asscalar())
py = int(nd.floor(coords[n][p][1] + 0.5).asscalar())
if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1:
diff = nd.concat(hm[py][px+1] - hm[py][px-1],
hm[py+1][px] - hm[py-1][px],
dim=0)
coords[n][p] += nd.sign(diff) * .25
preds = nd.zeros_like(coords)
# Transform back
for i in range(coords.shape[0]):
preds[i] = transform_preds(coords[i], center[i], scale[i],
[heatmap_width, heatmap_height])
return preds, maxvals