本文整理汇总了Python中mxnet.ndarray.mean方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.mean方法的具体用法?Python ndarray.mean怎么用?Python ndarray.mean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.mean方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mean
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import mean [as 别名]
def mean(input, dim):
return nd.mean(input, axis=dim)
示例2: reduce_mean
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import mean [as 别名]
def reduce_mean(input):
return input.mean()
示例3: unsorted_1d_segment_mean
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import mean [as 别名]
def unsorted_1d_segment_mean(input, seg_id, n_segs, dim):
# TODO: support other dimensions
assert dim == 0, 'MXNet only supports segment mean on first dimension'
n_ones = nd.ones_like(seg_id).astype(input.dtype)
w = unsorted_1d_segment_sum(n_ones, seg_id, n_segs, 0)
w = nd.clip(w, a_min=1, a_max=np.inf)
y = unsorted_1d_segment_sum(input, seg_id, n_segs, dim)
y = y / w.reshape((-1,) + (1,) * (y.ndim - 1))
return y
示例4: backward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import mean [as 别名]
def backward(self, grad_out):
lhs_data_nd, rhs_data_nd, out_data_nd, feat_shape, degs = self.saved_tensors
if self.reducer == 'mean':
grad_out = grad_out / degs
grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)
grad_lhs = nd.empty((lhs_data_nd.shape[0],) + feat_shape,
ctx=grad_out.context, dtype=grad_out.dtype)
K.backward_lhs_binary_op_reduce(
self.reducer if self.reducer != 'mean' else 'sum',
self.binary_op, self.graph, self.lhs, self.rhs,
lhs_data_nd, rhs_data_nd, out_data_nd, grad_out_nd,
zerocopy_to_dgl_ndarray_for_write(grad_lhs), self.lhs_map[1],
self.rhs_map[1], self.out_map[1])
grad_lhs = _reduce_grad(grad_lhs, lhs_data_nd.shape)
grad_rhs = nd.empty((rhs_data_nd.shape[0],) + feat_shape,
ctx=grad_out.context, dtype=grad_out.dtype)
K.backward_rhs_binary_op_reduce(
self.reducer if self.reducer != 'mean' else 'sum',
self.binary_op, self.graph, self.lhs, self.rhs,
lhs_data_nd, rhs_data_nd, out_data_nd, grad_out_nd,
zerocopy_to_dgl_ndarray_for_write(grad_rhs), self.lhs_map[1],
self.rhs_map[1], self.out_map[1])
grad_rhs = _reduce_grad(grad_rhs, rhs_data_nd.shape)
# clear saved tensors explicitly
self.saved_tensors = None
return grad_lhs, grad_rhs
示例5: hybrid_forward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import mean [as 别名]
def hybrid_forward(self, F, input_logits, target_logits, sample_weight=None):
input_softmax = F.softmax(input_logits, axis=1)
target_softmax = F.softmax(target_logits, axis=1)
loss = F.square(input_softmax - target_softmax)
return F.mean(loss, axis=self._batch_axis, exclude=True)
示例6: forward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import mean [as 别名]
def forward(self, lhs_data, rhs_data):
lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)
rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)
feat_shape = K.infer_binary_feature_shape(self.binary_op, lhs_data_nd, rhs_data_nd)
out_shape = feat_shape
if self.binary_op == 'dot':
out_shape = feat_shape[:-1]
out_data = nd.empty((self.out_size,) + out_shape,
ctx=lhs_data.context, dtype=lhs_data.dtype)
out_data_nd = zerocopy_to_dgl_ndarray_for_write(out_data)
K.binary_op_reduce(
self.reducer if self.reducer != 'mean' else 'sum',
self.binary_op, self.graph, self.lhs, self.rhs,
lhs_data_nd, rhs_data_nd, out_data_nd, self.lhs_map[0],
self.rhs_map[0], self.out_map[0])
# normalize if mean reducer
# NOTE(zihao): this is a temporary hack and we should have better solution in the future.
if self.reducer == 'mean':
degs = nd.empty((out_data.shape[0],),
ctx=out_data.context, dtype=out_data.dtype)
degs_nd = zerocopy_to_dgl_ndarray(degs)
if self.lhs != TargetCode.DST:
target = self.lhs
n = lhs_data.shape[0]
in_map = self.lhs_map[0]
else:
target = self.rhs
n = rhs_data.shape[0]
in_map = self.rhs_map[0]
in_ones = nd.ones((n,), ctx=lhs_data.context, dtype=lhs_data.dtype)
in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)
K.copy_reduce(
'sum', self.graph, target, in_ones_nd, degs_nd,
in_map, self.out_map[0])
# reshape
degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.ndim - 1)).clip(1, float('inf'))
out_data = out_data / degs
else:
degs = None
self.save_for_backward(lhs_data_nd, rhs_data_nd, out_data_nd,
feat_shape, degs)
return out_data