本文整理匯總了Python中mxnet.nd.NDArray方法的典型用法代碼示例。如果您正苦於以下問題:Python nd.NDArray方法的具體用法?Python nd.NDArray怎麽用?Python nd.NDArray使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.nd
的用法示例。
在下文中一共展示了nd.NDArray方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: default_mp_pad_batchify_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def default_mp_pad_batchify_fn(data):
"""Use shared memory for collating data into batch, labels are padded to same shape"""
if isinstance(data[0], nd.NDArray):
out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
ctx=context.Context('cpu_shared', 0))
return nd.stack(*data, out=out)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_mp_pad_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
batch_size = len(data)
pad = max([l.shape[0] for l in data] + [1,])
buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
for i, l in enumerate(data):
buf[i][:l.shape[0], :] = l
return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0))
示例2: _pad_arrays
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def _pad_arrays(
data: List[Union[np.ndarray, mx.nd.NDArray]], axis: int = 0,
) -> List[Union[np.ndarray, mx.nd.NDArray]]:
assert isinstance(data[0], (np.ndarray, mx.nd.NDArray))
is_mx = isinstance(data[0], mx.nd.NDArray)
# MxNet causes a segfault when persisting 0-length arrays. As such,
# we add a dummy pad of length 1 to 0-length dims.
max_len = max(1, functools.reduce(max, (x.shape[axis] for x in data)))
padded_data = []
for x in data:
# MxNet lacks the functionality to pad n-D arrays consistently.
# We fall back to numpy if x is an mx.nd.NDArray.
if is_mx:
x = x.asnumpy()
pad_size = max_len - x.shape[axis]
pad_lengths = [(0, 0)] * x.ndim
pad_lengths[axis] = (0, pad_size)
x_padded = np.pad(x, mode="constant", pad_width=pad_lengths)
padded_data.append(x_padded if not is_mx else mx.nd.array(x_padded))
return padded_data
示例3: _as_in_context
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def _as_in_context(batch: dict, ctx: mx.Context) -> DataBatch:
"""Move data into new context, should only be in main process."""
assert (
not MPWorkerInfo.worker_process
), "This function is not meant to be used in workers."
batch = {
k: v.as_in_context(ctx) if isinstance(v, nd.NDArray)
# Workaround due to MXNet not being able to handle NDArrays with 0 in shape properly:
else (
stack(v, False, v.dtype, ctx)
if isinstance(v[0], np.ndarray) and 0 in v[0].shape
else v
)
for k, v in batch.items()
}
return batch
示例4: dgl_mp_batchify_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def dgl_mp_batchify_fn(data):
if isinstance(data[0], tuple):
data = zip(*data)
return [dgl_mp_batchify_fn(i) for i in data]
for dt in data:
if dt is not None:
if isinstance(dt, dgl.DGLGraph):
return [d for d in data if isinstance(d, dgl.DGLGraph)]
elif isinstance(dt, nd.NDArray):
pad = Pad(axis=(1, 2), num_shards=1, ret_length=False)
data_list = [dt for dt in data if dt is not None]
return pad(data_list)
示例5: deal_output
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def deal_output(y: nd.NDArray, s, b, c):
"""
:param y:
:param s:
:param b:
:param c:
:return:
"""
label = y[:, 0:s * s * c]
preds = y[:, s * s * c: s * s * c + s * s * b]
location = y[:, s * s * c + s * s * b:]
label = nd.reshape(label, shape=(-1, s * s, c))
location = nd.reshape(location, shape=(-1, s * s, b, 4))
return label, preds, location
示例6: default_pad_batchify_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def default_pad_batchify_fn(data):
"""Collate data into batch, labels are padded to same shape"""
if isinstance(data[0], nd.NDArray):
return nd.stack(*data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_pad_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
pad = max([l.shape[0] for l in data] + [1,])
buf = np.full((len(data), pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
for i, l in enumerate(data):
buf[i][:l.shape[0], :] = l
return nd.array(buf, dtype=data[0].dtype)
示例7: tsn_mp_batchify_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def tsn_mp_batchify_fn(data):
"""Collate data into batch. Use shared memory for stacking.
Modify default batchify function for temporal segment networks.
Change `nd.stack` to `nd.concat` since batch dimension already exists.
"""
if isinstance(data[0], nd.NDArray):
return nd.concat(*data, dim=0)
elif isinstance(data[0], tuple):
data = zip(*data)
return [tsn_mp_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return nd.array(data, dtype=data.dtype,
ctx=context.Context('cpu_shared', 0))
示例8: forward
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def forward(self, x):
"""
:param nd.NDArray x: input data in NTC layout (N: batch-size, T: sequence len, C: channels)
:return: output of LSTNet in NC layout
:rtype nd.NDArray
"""
# Convolution
c = self.conv(x.transpose((0, 2, 1))) # Transpose NTC to to NCT (a.k.a NCW) before convolution
c = self.dropout(c)
# GRU
r = self.gru(c.transpose((2, 0, 1))) # Transpose NCT to TNC before GRU
r = r[-1] # Only keep the last output
r = self.dropout(r) # Now in NC layout
# Skip GRU
# Slice off multiples of skip from convolution output
skip_c = c[:, :, -(c.shape[2] // self.skip) * self.skip:]
skip_c = skip_c.reshape(c.shape[0], c.shape[1], -1, self.skip) # Reshape to NCT x skip
skip_c = skip_c.transpose((2, 0, 3, 1)) # Transpose to T x N x skip x C
skip_c = skip_c.reshape(skip_c.shape[0], -1, skip_c.shape[3]) # Reshape to Tx (Nxskip) x C
s = self.skip_gru(skip_c)
s = s[-1] # Only keep the last output (now in (Nxskip) x C layout)
s = s.reshape(x.shape[0], -1) # Now in N x (skipxC) layout
# FC layer
fc = self.fc(nd.concat(r, s)) # NC layout
# Autoregressive highway
ar_x = x[:, -self.ar_window:, :] # NTC layout
ar_x = ar_x.transpose((0, 2, 1)) # NCT layout
ar_x = ar_x.reshape(-1, ar_x.shape[2]) # (NC) x T layout
ar = self.ar_fc(ar_x)
ar = ar.reshape(x.shape[0], -1) # NC layout
# Add autoregressive and fc outputs
res = fc + ar
return res
示例9: mixup_transform
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def mixup_transform(label, classes, lam=1, eta=0.0):
if isinstance(label, nd.NDArray):
label = [label]
res = []
for l in label:
y1 = l.one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
y2 = l[::-1].one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
res.append(lam*y1 + (1-lam)*y2)
return res
示例10: smooth
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def smooth(label, classes, eta=0.1):
if isinstance(label, nd.NDArray):
label = [label]
smoothed = []
for l in label:
res = l.one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
smoothed.append(res)
return smoothed
示例11: rebuild_ndarray
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def rebuild_ndarray(*args):
"""Rebuild ndarray from pickled shared memory"""
# pylint: disable=no-value-for-parameter
return nd.NDArray(nd.ndarray._new_from_shared_mem(*args))
示例12: _is_stackable
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def _is_stackable(
arrays: List[Union[np.ndarray, mx.nd.NDArray, Any]], axis: int = 0,
) -> bool:
"""
Check if elements are scalars, have too few dimensions, or their
target axes have equal length; i.e. they are directly `stack` able.
"""
if isinstance(arrays[0], (mx.nd.NDArray, np.ndarray)):
s = set(arr.shape[axis] for arr in arrays)
return len(s) <= 1 and arrays[0].shape[axis] != 0
return True
示例13: hybrid_forward
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type:
"""
Used for forward pass through embedder network.
:param F: backend api, either `nd` or `sym` (if block has been hybridized).
:type F: nd or sym
:param x: vector representing environment state, of shape (batch_size, in_channels).
:return: embedding of environment state, of shape (batch_size, channels).
"""
if isinstance(x, nd.NDArray) and len(x.shape) != 2 and self.scheme != EmbedderScheme.Empty:
raise ValueError("Vector embedders expect the input size to have 2 dimensions. The given size is: {}"
.format(x.shape))
return super(VectorEmbedder, self).hybrid_forward(F, x, *args, **kwargs)
示例14: mixup_transform
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def mixup_transform(label, classes, lam=1, eta=0.0):
if isinstance(label, nd.NDArray):
label = [label]
res = []
for l in label:
y1 = l.one_hot(classes, on_value=1 - eta + eta / classes, off_value=eta / classes)
y2 = l[::-1].one_hot(classes, on_value=1 - eta + eta / classes, off_value=eta / classes)
res.append(lam * y1 + (1 - lam) * y2)
return res
示例15: smooth
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import NDArray [as 別名]
def smooth(label, classes, eta=0.1):
if isinstance(label, nd.NDArray):
label = [label]
smoothed = [
l.one_hot(classes, on_value=1 - eta + eta / classes, off_value=eta / classes)
for l in label
]
return smoothed