本文整理汇总了Python中mxnet.context.Context方法的典型用法代码示例。如果您正苦于以下问题:Python context.Context方法的具体用法?Python context.Context怎么用?Python context.Context使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.context
的用法示例。
在下文中一共展示了context.Context方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: default_mp_pad_batchify_fn
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import Context [as 别名]
def default_mp_pad_batchify_fn(data):
"""Use shared memory for collating data into batch, labels are padded to same shape"""
if isinstance(data[0], nd.NDArray):
out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
ctx=context.Context('cpu_shared', 0))
return nd.stack(*data, out=out)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_mp_pad_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
batch_size = len(data)
pad = max([l.shape[0] for l in data] + [1,])
buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
for i, l in enumerate(data):
buf[i][:l.shape[0], :] = l
return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0))
示例2: batchify
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import Context [as 别名]
def batchify(
data: List[dict],
dtype: DType,
multi_processing: bool,
single_process_ctx: Optional[mx.Context] = None,
variable_length: bool = False,
) -> DataBatch:
"""reduce the list of dictionaries to a single dictionary, where values
referenced by identical key are reduced using the stack function"""
return {
key: stack(
data=[item[key] for item in data],
multi_processing=multi_processing,
dtype=dtype,
single_process_ctx=single_process_ctx,
variable_length=variable_length,
)
for key in data[0].keys()
}
示例3: _as_in_context
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import Context [as 别名]
def _as_in_context(batch: dict, ctx: mx.Context) -> DataBatch:
"""Move data into new context, should only be in main process."""
assert (
not MPWorkerInfo.worker_process
), "This function is not meant to be used in workers."
batch = {
k: v.as_in_context(ctx) if isinstance(v, nd.NDArray)
# Workaround due to MXNet not being able to handle NDArrays with 0 in shape properly:
else (
stack(v, False, v.dtype, ctx)
if isinstance(v[0], np.ndarray) and 0 in v[0].shape
else v
)
for k, v in batch.items()
}
return batch
示例4: _batchify
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import Context [as 别名]
def _batchify(data):
"""
Collate data into batch. Use shared memory for stacking.
:param data: a list of array, with layout of 'NTC'.
:return either x and x's unpadded lengths, or x, x's unpadded lengths, y and y's unpadded lengths
if labels are not supplied.
"""
# input layout is NTC
keys, inputs, labels = [item[0] for item in data], [item[1] for item in data], \
[item[2] for item in data]
if len(data) > 1:
max_data_len = max([seq.shape[0] for seq in inputs])
max_labels_len = 0 if not labels else max([seq.shape[0] for seq in labels])
else:
max_data_len = inputs[0].shape[0]
max_labels_len = 0 if not labels else labels[0].shape[0]
x_lens = [item.shape[0] for item in inputs]
y_lens = [item.shape[0] for item in labels]
for i, seq in enumerate(inputs):
pad_len = max_data_len - seq.shape[0]
inputs[i] = np.pad(seq, ((0, pad_len), (0, 0)), 'constant', constant_values=0)
labels[i] = np.pad(labels[i], (0, max_labels_len - labels[i].shape[0]),
'constant', constant_values=-1)
inputs = np.asarray(inputs, dtype=np.float32)
if labels is not None:
labels = np.asarray(labels, dtype=np.float32)
inputs = inputs.transpose((1, 0, 2))
labels = labels.transpose((1, 0))
return (nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(x_lens, ctx=context.Context('cpu_shared', 0))) \
if labels is None else (
nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(x_lens, ctx=context.Context('cpu_shared', 0)),
nd.array(labels, dtype=labels.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(y_lens, ctx=context.Context('cpu_shared', 0)))
示例5: load
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import Context [as 别名]
def load(prefix, epoch, load_optimizer_states=False, **kwargs):
"""Create a model from previously saved checkpoint.
Parameters
----------
prefix : str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
epoch : int
epoch to load.
load_optimizer_states : bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is `cpu()`.
work_load_list : list of number
Default `None`, indicating uniform workload.
fixed_param_names: list of str
Default `None`, indicating no network parameters are fixed.
"""
sym, args, auxs = load_checkpoint(prefix, epoch)
mod = Module(symbol=sym, **kwargs)
mod._arg_params = args
mod._aux_params = auxs
mod.params_initialized = True
if load_optimizer_states:
mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch)
return mod
示例6: tsn_mp_batchify_fn
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import Context [as 别名]
def tsn_mp_batchify_fn(data):
"""Collate data into batch. Use shared memory for stacking.
Modify default batchify function for temporal segment networks.
Change `nd.stack` to `nd.concat` since batch dimension already exists.
"""
if isinstance(data[0], nd.NDArray):
return nd.concat(*data, dim=0)
elif isinstance(data[0], tuple):
data = zip(*data)
return [tsn_mp_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return nd.array(data, dtype=data.dtype,
ctx=context.Context('cpu_shared', 0))
示例7: load
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import Context [as 别名]
def load(prefix, epoch, load_optimizer_states=False, **kwargs):
"""Creates a model from previously saved checkpoint.
Parameters
----------
prefix : str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
epoch : int
epoch to load.
load_optimizer_states : bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is ``cpu()``.
work_load_list : list of number
Default ``None``, indicating uniform workload.
fixed_param_names: list of str
Default ``None``, indicating no network parameters are fixed.
"""
sym, args, auxs = load_checkpoint(prefix, epoch)
mod = DetModule(symbol=sym, **kwargs)
mod._arg_params = args
mod._aux_params = auxs
mod.params_initialized = True
if load_optimizer_states:
mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch)
return mod
示例8: reduce_ndarray
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import Context [as 别名]
def reduce_ndarray(data):
"""Reduce ndarray to shared memory handle"""
# keep a local ref before duplicating fd
data = data.as_in_context(context.Context("cpu_shared", 0))
pid, fd, shape, dtype = data._to_shared_mem()
fd = multiprocessing.reduction.DupFd(fd)
return rebuild_ndarray, (pid, fd, shape, dtype)