本文整理汇总了Python中mxnet.ndarray.NDArray方法的典型用法代码示例。如果您正苦于以下问题:Python ndarray.NDArray方法的具体用法?Python ndarray.NDArray怎么用?Python ndarray.NDArray使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.ndarray
的用法示例。
在下文中一共展示了ndarray.NDArray方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sparse_nd_setitem
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def test_sparse_nd_setitem():
def check_sparse_nd_setitem(stype, shape, dst):
x = mx.nd.zeros(shape=shape, stype=stype)
x[:] = dst
dst_nd = mx.nd.array(dst) if isinstance(dst, (np.ndarray, np.generic)) else dst
assert np.all(x.asnumpy() == dst_nd.asnumpy() if isinstance(dst_nd, NDArray) else dst)
shape = rand_shape_2d()
for stype in ['row_sparse', 'csr']:
# ndarray assignment
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, 'default'))
check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, stype))
# numpy assignment
check_sparse_nd_setitem(stype, shape, np.ones(shape))
# scalar assigned to row_sparse NDArray
check_sparse_nd_setitem('row_sparse', shape, 2)
示例2: test_create_sparse_nd_from_dense
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def test_create_sparse_nd_from_dense():
def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx):
arr = f(dense_arr, dtype=dtype, ctx=ctx)
assert(same(arr.asnumpy(), np.ones(shape)))
assert(arr.dtype == dtype)
assert(arr.context == ctx)
# verify the default dtype inferred from dense arr
arr2 = f(dense_arr)
assert(arr2.dtype == default_dtype)
assert(arr2.context == Context.default_ctx)
shape = rand_shape_2d()
dtype = np.int32
src_dtype = np.float64
ctx = mx.cpu(1)
dense_arrs = [mx.nd.ones(shape, dtype=src_dtype), np.ones(shape, dtype=src_dtype), \
np.ones(shape, dtype=src_dtype).tolist()]
for f in [mx.nd.sparse.csr_matrix, mx.nd.sparse.row_sparse_array]:
for dense_arr in dense_arrs:
default_dtype = dense_arr.dtype if isinstance(dense_arr, (NDArray, np.ndarray)) \
else np.float32
check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx)
示例3: get_params
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
target parameter arrays
aux_params : list of NDArray
target aux arrays
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
示例4: get_outputs
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def get_outputs(self, merge_multi_context=True):
"""Get outputs of the previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(len(self.execs[0].outputs))]
if merge_multi_context:
outputs = _merge_multi_context(outputs, self.output_layouts)
return outputs
示例5: get_states
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def get_states(self, merge_multi_context=True):
"""Get states from all devices
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the states
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
assert not merge_multi_context, \
"merge_multi_context=True is not supported for get_states yet."
return self.state_arrays
示例6: backward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
`self.for_training` is `True`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
# for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
for i, exec_ in enumerate(self.execs):
out_grads_slice = []
exec_.backward(out_grads=out_grads_slice)
示例7: pad_packed_tensor
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def pad_packed_tensor(input, lengths, value, l_min=None):
old_shape = input.shape
if isinstance(lengths, nd.NDArray):
max_len = as_scalar(input.max())
else:
max_len = builtins.max(lengths)
if l_min is not None:
max_len = builtins.max(max_len, l_min)
batch_size = len(lengths)
ctx = input.context
dtype = input.dtype
x = nd.full((batch_size * max_len, *old_shape[1:]), value, ctx=ctx, dtype=dtype)
index = []
for i, l in enumerate(lengths):
index.extend(range(i * max_len, i * max_len + l))
index = nd.array(index, ctx=ctx)
return scatter_row(x, index, input).reshape(batch_size, max_len, *old_shape[1:])
示例8: split_load_kwargs
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def split_load_kwargs(inputs, kwargs, ctx_list, batch_axis=0):
r"""Split with support for kwargs dictionary"""
def split_map(obj):
if isinstance(obj, NDArray):
return split_and_load(obj, ctx_list, batch_axis, even_split=False)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(split_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(split_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(split_map, obj.items()))))
return [obj for _ in ctx_list]
inputs = split_map(inputs) if inputs else []
kwargs = split_map(kwargs) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
示例9: split_load_kwargs
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def split_load_kwargs(inputs, kwargs, ctx_list, batch_axis=0):
r"""Split with support for kwargs dictionary"""
def split_map(obj):
if isinstance(obj, NDArray):
return split_and_load(obj, ctx_list, batch_axis, even_split=False)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(split_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(split_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(split_map, obj.items()))))
return [obj for _ in ctx_list]
inputs = split_map(inputs) if inputs else []
kwargs = split_map(kwargs) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
示例10: backward
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
`self.for_training` is `True`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
for i, exec_ in enumerate(self.execs):
out_grads_slice = []
exec_.backward(out_grads=out_grads_slice)
示例11: get_input_grads
# 需要导入模块: from mxnet import ndarray [as 别名]
# 或者: from mxnet.ndarray import NDArray [as 别名]
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it
is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output
elements are `NDArray`.
"""
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
return self.input_grad_arrays