本文整理汇总了Python中mxnet.context.cpu方法的典型用法代码示例。如果您正苦于以下问题:Python context.cpu方法的具体用法?Python context.cpu怎么用?Python context.cpu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.context
的用法示例。
在下文中一共展示了context.cpu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def __init__(self, symbol, data_names, label_names,
logger=logging, context=ctx.cpu(), work_load_list=None,
max_data_shapes=None, max_label_shapes=None, fixed_param_prefix=None):
super(MutableModule, self).__init__(logger=logger)
self._symbol = symbol
self._data_names = data_names
self._label_names = label_names
self._context = context
self._work_load_list = work_load_list
self._curr_module = None
self._max_data_shapes = max_data_shapes
self._max_label_shapes = max_label_shapes
self._fixed_param_prefix = fixed_param_prefix
fixed_param_names = list()
if fixed_param_prefix is not None:
for name in self._symbol.list_arguments():
for prefix in self._fixed_param_prefix:
if prefix in name:
fixed_param_names.append(name)
self._fixed_param_names = fixed_param_names
示例2: get_params
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
target parameter arrays
aux_params : list of NDArray
target aux arrays
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
示例3: __init__
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def __init__(self, symbol, data_names, label_names,
logger=logging, context=ctx.cpu(), work_load_list=None,
max_data_shapes=None, max_label_shapes=None, fixed_param_prefix=None):
super(MutableModule, self).__init__(logger=logger)
self._symbol = symbol
self._data_names = data_names
self._label_names = label_names
self._context = context
self._work_load_list = work_load_list
self._curr_module = None
self._max_data_shapes = max_data_shapes
self._max_label_shapes = max_label_shapes
self._fixed_param_prefix = fixed_param_prefix
fixed_param_names = list()
if fixed_param_prefix is not None:
for name in self._symbol.list_arguments():
for prefix in self._fixed_param_prefix:
if prefix in name:
fixed_param_names.append(name)
self._fixed_param_names = fixed_param_names
self._preload_opt_states = None
示例4: get_duc_mobilenet
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def get_duc_mobilenet(base_network, pretrained=False, ctx=cpu(), **kwargs):
"""Get mobilenet with duc upsampling layers.
Parameters
----------
base_network : str
Name of the base feature extraction network.
pretrained : bool
Whether load pretrained base network.
ctx : mxnet.Context
mx.cpu() or mx.gpu()
Returns
-------
nn.HybridBlock
Network instance of mobilenet with duc upsampling layers
"""
net = DUCMobilenet(base_network=base_network, pretrained_base=pretrained, **kwargs)
with warnings.catch_warnings(record=True) as _:
warnings.simplefilter("always")
net.initialize()
net.collect_params().reset_ctx(ctx)
return net
示例5: __init__
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def __init__(self, nclass, backbone='resnet50', aux=True, ctx=cpu(), pretrained_base=True,
height=None, width=None, base_size=520, crop_size=480, **kwargs):
super(DeepLabV3, self).__init__(nclass, aux, backbone, ctx=ctx, base_size=base_size,
crop_size=crop_size, pretrained_base=pretrained_base, **kwargs)
height = height if height is not None else crop_size
width = width if width is not None else crop_size
with self.name_scope():
self.head = _DeepLabHead(nclass, height=height//8,
width=width//8, **kwargs)
self.head.initialize(ctx=ctx)
self.head.collect_params().setattr('lr_mult', 10)
if self.aux:
self.auxlayer = _FCNHead(1024, nclass, **kwargs)
self.auxlayer.initialize(ctx=ctx)
self.auxlayer.collect_params().setattr('lr_mult', 10)
self._up_kwargs = {'height': height, 'width': width}
示例6: __init__
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def __init__(self, width_mult=1, ctx=cpu(), **kwargs):
configs = list(map(lambda x: 3 if x == 3 else
int(x*width_mult), AlexNetLegacy.configs))
super(AlexNetLegacy, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
with self.features.name_scope():
self.features.add(nn.Conv2D(configs[1], kernel_size=11, strides=2),
nn.BatchNorm(),
nn.MaxPool2D(pool_size=3, strides=2),
nn.Activation('relu'))
self.features.add(nn.Conv2D(configs[2], kernel_size=5),
nn.BatchNorm(),
nn.MaxPool2D(pool_size=3, strides=2),
nn.Activation('relu'))
self.features.add(nn.Conv2D(configs[3], kernel_size=3),
nn.BatchNorm(),
nn.Activation('relu'))
self.features.add(nn.Conv2D(configs[4], kernel_size=3),
nn.BatchNorm(),
nn.Activation('relu'))
self.features.add(nn.Conv2D(configs[5], kernel_size=3),
nn.BatchNorm())
self.features.initialize(ctx=ctx)
示例7: alexnet
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def alexnet(pretrained=False, ctx=cpu(),
root='~/.mxnet/models', **kwargs):
r"""AlexNet model from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = AlexNet(**kwargs)
if pretrained:
from .model_store import get_model_file
net.load_parameters(get_model_file('alexnet', tag=pretrained, root=root), ctx=ctx)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
net.synset = attrib.synset
net.classes = attrib.classes
net.classes_long = attrib.classes_long
return net
示例8: kv_push
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def kv_push(self, key, value):
#if value.context!=mx.cpu():
# value = value.as_in_context(mx.cpu())
if not key in self._kvinit:
self._distkv.init(key, nd.zeros_like(value))
self._kvinit[key] = 1
self._distkv.push(key, value)
#get fc1 and partial fc7
示例9: load
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def load(prefix, epoch, load_optimizer_states=False, **kwargs):
"""Create a model from previously saved checkpoint.
Parameters
----------
prefix : str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
epoch : int
epoch to load.
load_optimizer_states : bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is `cpu()`.
work_load_list : list of number
Default `None`, indicating uniform workload.
fixed_param_names: list of str
Default `None`, indicating no network parameters are fixed.
"""
sym, args, auxs = load_checkpoint(prefix, epoch)
mod = Module(symbol=sym, **kwargs)
mod._arg_params = args
mod._aux_params = auxs
mod.params_initialized = True
if load_optimizer_states:
mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch)
return mod
示例10: resnest14
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def resnest14(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNeSt-14 model.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
dilated: bool, default False
Whether to apply dilation strategy to ResNeSt, yielding a stride 8 model.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`).
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
model = ResNeSt(Bottleneck, [1, 1, 1, 1],
radix=2, cardinality=1, bottleneck_width=64,
deep_stem=True, avg_down=True,
avd=True, avd_first=False,
use_splat=True, dropblock_prob=0.0,
name_prefix='resnest_', **kwargs)
if pretrained:
from .model_store import get_model_file
model.load_parameters(get_model_file('resnest14', root=root), ctx=ctx)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
示例11: resnest26
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def resnest26(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNeSt-26 model.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
dilated: bool, default False
Whether to apply dilation strategy to ResNeSt, yielding a stride 8 model.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`).
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
model = ResNeSt(Bottleneck, [2, 2, 2, 2],
radix=2, cardinality=1, bottleneck_width=64,
deep_stem=True, avg_down=True,
avd=True, avd_first=False,
use_splat=True, dropblock_prob=0.1,
name_prefix='resnest_', **kwargs)
if pretrained:
from .model_store import get_model_file
model.load_parameters(get_model_file('resnest26', root=root), ctx=ctx)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
示例12: resnest50
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def resnest50(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNeSt-50 model.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
dilated: bool, default False
Whether to apply dilation strategy to ResNeSt, yielding a stride 8 model.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`).
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
model = ResNeSt(Bottleneck, [3, 4, 6, 3],
radix=2, cardinality=1, bottleneck_width=64,
deep_stem=True, avg_down=True,
avd=True, avd_first=False,
use_splat=True, dropblock_prob=0.1,
name_prefix='resnest_', **kwargs)
if pretrained:
from .model_store import get_model_file
model.load_parameters(get_model_file('resnest50', root=root), ctx=ctx)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
示例13: resnest101
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def resnest101(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNeSt-101 model.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
dilated: bool, default False
Whether to apply dilation strategy to ResNeSt, yielding a stride 8 model.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`).
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
model = ResNeSt(Bottleneck, [3, 4, 23, 3],
radix=2, cardinality=1, bottleneck_width=64,
deep_stem=True, avg_down=True, stem_width=64,
avd=True, avd_first=False, use_splat=True, dropblock_prob=0.1,
name_prefix='resnest_', **kwargs)
if pretrained:
from .model_store import get_model_file
model.load_parameters(get_model_file('resnest101', root=root), ctx=ctx)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
示例14: resnest200
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def resnest200(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNeSt-200 model.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
dilated: bool, default False
Whether to apply dilation strategy to ResNeSt, yielding a stride 8 model.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`).
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
model = ResNeSt(Bottleneck, [3, 24, 36, 3], deep_stem=True, avg_down=True, stem_width=64,
avd=True, use_splat=True, dropblock_prob=0.1, final_drop=0.2,
name_prefix='resnest_', **kwargs)
if pretrained:
from .model_store import get_model_file
model.load_parameters(get_model_file('resnest200', root=root), ctx=ctx)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
model.synset = attrib.synset
model.classes = attrib.classes
model.classes_long = attrib.classes_long
return model
示例15: get_squeezenet
# 需要导入模块: from mxnet import context [as 别名]
# 或者: from mxnet.context import cpu [as 别名]
def get_squeezenet(version, pretrained=False, ctx=cpu(),
root='~/.mxnet/models', **kwargs):
r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = SqueezeNet(version, **kwargs)
if pretrained:
from .model_store import get_model_file
net.load_parameters(get_model_file('squeezenet%s'%version,
tag=pretrained, root=root), ctx=ctx)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
net.synset = attrib.synset
net.classes = attrib.classes
net.classes_long = attrib.classes_long
return net