本文整理汇总了Python中mxnet.nd.pick方法的典型用法代码示例。如果您正苦于以下问题:Python nd.pick方法的具体用法?Python nd.pick怎么用?Python nd.pick使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.nd
的用法示例。
在下文中一共展示了nd.pick方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: hybrid_forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import pick [as 别名]
def hybrid_forward(self, F, X, y=None):
# import pdb; pdb.set_trace()
X = self.net[0](X) # Conv1
X = self.net[1](X) # Primary Capsule
X = self.net[2](X) # Digital Capsule
# import pdb ; pdb.set_trace()
X = X.reshape((X.shape[0],X.shape[2], X.shape[4]))
# get length of vector for margin loss calculation
X_l2norm = nd.sqrt((X**2).sum(axis=-1))
# import pdb ; pdb.set_trace()
prob = nd.softmax(X_l2norm, axis=-1)
if y is not None:
max_len_indices = y
else:
max_len_indices = nd.argmax(prob,axis=-1)
y_tile = nd.tile(y.expand_dims(axis=1), reps=(1, X.shape[-1]))
batch_activated_capsules = nd.pick(X, y_tile, axis=1, keepdims=True)
reconstrcutions = self.net[3](batch_activated_capsules)
return prob, X_l2norm, reconstrcutions
示例2: hybrid_forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import pick [as 别名]
def hybrid_forward(self, F, pred, label):
"""Compute loss"""
softmaxout = F.SoftmaxOutput(
pred, label.astype(pred.dtype), ignore_label=self._ignore_label,
multi_output=self._sparse_label,
use_ignore=True, normalization='valid' if self._size_average else 'null')
if self._sparse_label:
loss = -F.pick(F.log(softmaxout), label, axis=1, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -F.sum(F.log(softmaxout) * label, axis=-1, keepdims=True)
loss = F.where(label.expand_dims(axis=1) == self._ignore_label,
F.zeros_like(loss), loss)
return F.mean(loss, axis=self._batch_axis, exclude=True)
示例3: _mixup_forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import pick [as 别名]
def _mixup_forward(self, F, pred, label1, label2, lam, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, self._axis)
if self._sparse_label:
loss1 = -F.pick(pred, label1, axis=self._axis, keepdims=True)
loss2 = -F.pick(pred, label2, axis=self._axis, keepdims=True)
loss = lam * loss1 + (1 - lam) * loss2
else:
label1 = _reshape_like(F, label1, pred)
label2 = _reshape_like(F, label2, pred)
loss1 = -F.sum(pred*label1, axis=self._axis, keepdims=True)
loss2 = -F.sum(pred*label2, axis=self._axis, keepdims=True)
loss = lam * loss1 + (1 - lam) * loss2
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
示例4: hybrid_forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import pick [as 别名]
def hybrid_forward(self, F, pred, label):
"""Compute loss"""
softmaxout = F.SoftmaxOutput(
pred, label.astype(pred.dtype), ignore_label=self._ignore_label,
multi_output=self._sparse_label,
use_ignore=True, normalization='valid' if self._size_average else 'null')
loss = -F.pick(F.log(softmaxout), label, axis=1, keepdims=True)
loss = F.where(label.expand_dims(axis=1) == self._ignore_label,
F.zeros_like(loss), loss)
return F.mean(loss, axis=self._batch_axis, exclude=True)
示例5: _gather_feat
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import pick [as 别名]
def _gather_feat(feat, ind, mask=None):
# K cannot be 1 for this implementation
K = ind.shape[1]
batch_size = ind.shape[0]
attri_dim = feat.shape[2]
flatten_ind = ind.flatten()
for i in range(batch_size):
if i == 0:
output = feat[i, ind[i]].expand_dims(2) # similar to nd.pick
else:
output = nd.concat(output, feat[i, ind[i]].expand_dims(2), dim=2)
output = output.swapaxes(dim1 = 1, dim2 = 2)
return output
示例6: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import pick [as 别名]
def forward(self, cls_pred, box_pred, cls_target, box_target):
"""Compute loss in entire batch across devices."""
# require results across different devices at this time
cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
for x in (cls_pred, box_pred, cls_target, box_target)]
# cross device reduction to obtain positive samples in entire batch
num_pos = []
for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
pos_samples = (ct > 0)
num_pos.append(pos_samples.sum())
num_pos_all = sum([p.asscalar() for p in num_pos])
if num_pos_all < 1:
# no positive samples found, return dummy losses
return nd.zeros((1,)), nd.zeros((1,)), nd.zeros((1,))
# compute element-wise cross entropy loss and sort, then perform negative mining
cls_losses = []
box_losses = []
sum_losses = []
for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
pred = nd.log_softmax(cp, axis=-1)
pos = ct > 0
cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
hard_negative = rank < (pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
# mask out if not positive or negative
cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / num_pos_all)
bp = _reshape_like(nd, bp, bt)
box_loss = nd.abs(bp - bt)
box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
(0.5 / self._rho) * nd.square(box_loss))
# box loss only apply to positive samples
box_loss = box_loss * pos.expand_dims(axis=-1)
box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / num_pos_all)
sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])
return sum_losses, cls_losses, box_losses
示例7: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import pick [as 别名]
def forward(self, cls_pred, box_pred, cls_target, box_target):
"""Compute loss in entire batch across devices."""
# require results across different devices at this time
cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
for x in (cls_pred, box_pred, cls_target, box_target)]
# cross device reduction to obtain positive samples in entire batch
num_pos = []
for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
pos_samples = (ct > 0)
num_pos.append(pos_samples.sum())
num_pos_all = sum([p.asscalar() for p in num_pos])
if num_pos_all < 1 and self._min_hard_negatives < 1:
# no positive samples and no hard negatives, return dummy losses
cls_losses = [nd.sum(cp * 0) for cp in cls_pred]
box_losses = [nd.sum(bp * 0) for bp in box_pred]
sum_losses = [nd.sum(cp * 0) + nd.sum(bp * 0) for cp, bp in zip(cls_pred, box_pred)]
return sum_losses, cls_losses, box_losses
# compute element-wise cross entropy loss and sort, then perform negative mining
cls_losses = []
box_losses = []
sum_losses = []
for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
pred = nd.log_softmax(cp, axis=-1)
pos = ct > 0
cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
hard_negative = rank < nd.maximum(self._min_hard_negatives, pos.sum(axis=1)
* self._negative_mining_ratio).expand_dims(-1)
# mask out if not positive or negative
cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / max(1., num_pos_all))
bp = _reshape_like(nd, bp, bt)
box_loss = nd.abs(bp - bt)
box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
(0.5 / self._rho) * nd.square(box_loss))
# box loss only apply to positive samples
box_loss = box_loss * pos.expand_dims(axis=-1)
box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / max(1., num_pos_all))
sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])
return sum_losses, cls_losses, box_losses