當前位置: 首頁>>代碼示例>>Python>>正文


Python ndarray.NDArray方法代碼示例

本文整理匯總了Python中mxnet.ndarray.NDArray方法的典型用法代碼示例。如果您正苦於以下問題:Python ndarray.NDArray方法的具體用法?Python ndarray.NDArray怎麽用?Python ndarray.NDArray使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.ndarray的用法示例。


在下文中一共展示了ndarray.NDArray方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_sparse_nd_setitem

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def test_sparse_nd_setitem():
    def check_sparse_nd_setitem(stype, shape, dst):
        x = mx.nd.zeros(shape=shape, stype=stype)
        x[:] = dst
        dst_nd = mx.nd.array(dst) if isinstance(dst, (np.ndarray, np.generic)) else dst
        assert np.all(x.asnumpy() == dst_nd.asnumpy() if isinstance(dst_nd, NDArray) else dst)

    shape = rand_shape_2d()
    for stype in ['row_sparse', 'csr']:
        # ndarray assignment
        check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, 'default'))
        check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, stype))
        # numpy assignment
        check_sparse_nd_setitem(stype, shape, np.ones(shape))
    # scalar assigned to row_sparse NDArray
    check_sparse_nd_setitem('row_sparse', shape, 2) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:18,代碼來源:test_sparse_ndarray.py

示例2: test_create_sparse_nd_from_dense

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def test_create_sparse_nd_from_dense():
    def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx):
        arr = f(dense_arr, dtype=dtype, ctx=ctx)
        assert(same(arr.asnumpy(), np.ones(shape)))
        assert(arr.dtype == dtype)
        assert(arr.context == ctx)
        # verify the default dtype inferred from dense arr
        arr2 = f(dense_arr)
        assert(arr2.dtype == default_dtype)
        assert(arr2.context == Context.default_ctx)
    shape = rand_shape_2d()
    dtype = np.int32
    src_dtype = np.float64
    ctx = mx.cpu(1)
    dense_arrs = [mx.nd.ones(shape, dtype=src_dtype), np.ones(shape, dtype=src_dtype), \
                  np.ones(shape, dtype=src_dtype).tolist()]
    for f in [mx.nd.sparse.csr_matrix, mx.nd.sparse.row_sparse_array]:
        for dense_arr in dense_arrs:
            default_dtype = dense_arr.dtype if isinstance(dense_arr, (NDArray, np.ndarray)) \
                            else np.float32
            check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:23,代碼來源:test_sparse_ndarray.py

示例3: get_params

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def get_params(self, arg_params, aux_params):
        """ Copy data from each executor to `arg_params` and `aux_params`.

        Parameters
        ----------
        arg_params : list of NDArray
            target parameter arrays
        aux_params : list of NDArray
            target aux arrays

        Notes
        -----
        - This function will inplace update the NDArrays in arg_params and aux_params.
        """
        for name, block in zip(self.param_names, self.param_arrays):
            weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
            weight.astype(arg_params[name].dtype).copyto(arg_params[name])
        for name, block in zip(self.aux_names, self.aux_arrays):
            weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
            weight.astype(aux_params[name].dtype).copyto(aux_params[name]) 
開發者ID:tonysy,項目名稱:Deep-Feature-Flow-Segmentation,代碼行數:22,代碼來源:DataParallelExecutorGroup.py

示例4: get_outputs

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def get_outputs(self, merge_multi_context=True):
        """Get outputs of the previous forward computation.

        Parameters
        ----------
        merge_multi_context : bool
            Default is `True`. In the case when data-parallelism is used, the outputs
            will be collected from multiple devices. A `True` value indicate that we
            should merge the collected results so that they look like from a single
            executor.

        Returns
        -------
        If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
        is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
        elements are `NDArray`.
        """
        outputs = [[exec_.outputs[i] for exec_ in self.execs]
                   for i in range(len(self.execs[0].outputs))]
        if merge_multi_context:
            outputs = _merge_multi_context(outputs, self.output_layouts)
        return outputs 
開發者ID:tonysy,項目名稱:Deep-Feature-Flow-Segmentation,代碼行數:24,代碼來源:DataParallelExecutorGroup.py

示例5: get_states

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def get_states(self, merge_multi_context=True):
        """Get states from all devices

        Parameters
        ----------
        merge_multi_context : bool
            Default is `True`. In the case when data-parallelism is used, the states
            will be collected from multiple devices. A `True` value indicate that we
            should merge the collected results so that they look like from a single
            executor.

        Returns
        -------
        If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
        is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
        elements are `NDArray`.
        """
        assert not merge_multi_context, \
            "merge_multi_context=True is not supported for get_states yet."
        return self.state_arrays 
開發者ID:tonysy,項目名稱:Deep-Feature-Flow-Segmentation,代碼行數:22,代碼來源:DataParallelExecutorGroup.py

示例6: backward

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def backward(self, out_grads=None):
        """Run backward on all devices. A backward should be called after
        a call to the forward function. Backward cannot be called unless
        `self.for_training` is `True`.

        Parameters
        ----------
        out_grads : NDArray or list of NDArray, optional
            Gradient on the outputs to be propagated back.
            This parameter is only needed when bind is called
            on outputs that are not a loss function.
        """
        assert self.for_training, 're-bind with for_training=True to run backward'
        if out_grads is None:
            out_grads = []

        # for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
        for i, exec_ in enumerate(self.execs):
            out_grads_slice = []
            exec_.backward(out_grads=out_grads_slice) 
開發者ID:tonysy,項目名稱:Deep-Feature-Flow-Segmentation,代碼行數:22,代碼來源:DataParallelExecutorGroup.py

示例7: pad_packed_tensor

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def pad_packed_tensor(input, lengths, value, l_min=None):
    old_shape = input.shape
    if isinstance(lengths, nd.NDArray):
        max_len = as_scalar(input.max())
    else:
        max_len = builtins.max(lengths)

    if l_min is not None:
        max_len = builtins.max(max_len, l_min)

    batch_size = len(lengths)
    ctx = input.context
    dtype = input.dtype
    x = nd.full((batch_size * max_len, *old_shape[1:]), value, ctx=ctx, dtype=dtype)
    index = []
    for i, l in enumerate(lengths):
        index.extend(range(i * max_len, i * max_len + l))
    index = nd.array(index, ctx=ctx)
    return scatter_row(x, index, input).reshape(batch_size, max_len, *old_shape[1:]) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:21,代碼來源:tensor.py

示例8: split_load_kwargs

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def split_load_kwargs(inputs, kwargs, ctx_list, batch_axis=0):
    r"""Split with support for kwargs dictionary"""

    def split_map(obj):
        if isinstance(obj, NDArray):
            return split_and_load(obj, ctx_list, batch_axis, even_split=False)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(split_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(split_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(split_map, obj.items()))))
        return [obj for _ in ctx_list]

    inputs = split_map(inputs) if inputs else []
    kwargs = split_map(kwargs) if kwargs else []
    if len(inputs) < len(kwargs):
        inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
    elif len(kwargs) < len(inputs):
        kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
    inputs = tuple(inputs)
    kwargs = tuple(kwargs)
    return inputs, kwargs 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:25,代碼來源:parallel.py

示例9: split_load_kwargs

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def split_load_kwargs(inputs, kwargs, ctx_list, batch_axis=0):
    r"""Split with support for kwargs dictionary"""
    def split_map(obj):
        if isinstance(obj, NDArray):
            return split_and_load(obj, ctx_list, batch_axis, even_split=False)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(split_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(split_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(split_map, obj.items()))))
        return [obj for _ in ctx_list]
    inputs = split_map(inputs) if inputs else []
    kwargs = split_map(kwargs) if kwargs else []
    if len(inputs) < len(kwargs):
        inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
    elif len(kwargs) < len(inputs):
        kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
    inputs = tuple(inputs)
    kwargs = tuple(kwargs)
    return inputs, kwargs 
開發者ID:Angzz,項目名稱:panoptic-fpn-gluon,代碼行數:23,代碼來源:parallel.py

示例10: backward

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def backward(self, out_grads=None):
        """Run backward on all devices. A backward should be called after
        a call to the forward function. Backward cannot be called unless
        `self.for_training` is `True`.

        Parameters
        ----------
        out_grads : NDArray or list of NDArray, optional
            Gradient on the outputs to be propagated back.
            This parameter is only needed when bind is called
            on outputs that are not a loss function.
        """
        assert self.for_training, 're-bind with for_training=True to run backward'
        if out_grads is None:
            out_grads = []

        for i, exec_ in enumerate(self.execs):
            out_grads_slice = []
            exec_.backward(out_grads=out_grads_slice) 
開發者ID:i-pan,項目名稱:kaggle-rsna18,代碼行數:21,代碼來源:DataParallelExecutorGroup.py

示例11: get_input_grads

# 需要導入模塊: from mxnet import ndarray [as 別名]
# 或者: from mxnet.ndarray import NDArray [as 別名]
def get_input_grads(self, merge_multi_context=True):
        """Get the gradients with respect to the inputs of the module.

        Parameters
        ----------
        merge_multi_context : bool
            Default is `True`. In the case when data-parallelism is used, the outputs
            will be collected from multiple devices. A `True` value indicate that we
            should merge the collected results so that they look like from a single
            executor.

        Returns
        -------
        If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it
        is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output
        elements are `NDArray`.
        """
        assert self.inputs_need_grad
        if merge_multi_context:
            return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
        return self.input_grad_arrays 
開發者ID:i-pan,項目名稱:kaggle-rsna18,代碼行數:23,代碼來源:DataParallelExecutorGroup.py


注:本文中的mxnet.ndarray.NDArray方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。