本文整理匯總了Python中mxnet.gluon.ParameterDict方法的典型用法代碼示例。如果您正苦於以下問題:Python gluon.ParameterDict方法的具體用法?Python gluon.ParameterDict怎麽用?Python gluon.ParameterDict使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.gluon
的用法示例。
在下文中一共展示了gluon.ParameterDict方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: model_fn
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def model_fn(model_dir):
"""Load the gluon model. Called once when hosting service starts.
Args:
model_dir: The directory where model files are stored.
Returns:
a model (in this case a Gluon network)
"""
symbol = mx.sym.load("%s/model.json" % model_dir)
outputs = mx.symbol.softmax(data=symbol, name="softmax_label")
inputs = mx.sym.var("data")
param_dict = gluon.ParameterDict("model_")
net = gluon.SymbolBlock(outputs, inputs, param_dict)
net.load_params("%s/model.params" % model_dir, ctx=mx.cpu())
return net
示例2: test_paramdict
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def test_paramdict():
ctx = mx.cpu(1)
params0 = gluon.ParameterDict('net_')
params0.get('w0', shape=(10, 10))
params0.get('w1', shape=(10, 10), stype='row_sparse')
all_row_ids = mx.nd.arange(0, 10, ctx=ctx)
# check param names
assert list(params0.keys()) == ['net_w0', 'net_w1']
params0.initialize(ctx=ctx)
trainer0 = mx.gluon.Trainer(params0, 'sgd')
prev_w0 = params0.get('w0').data(ctx)
prev_w1 = params0.get('w1').row_sparse_data(all_row_ids)
# save params
params0.save('test_paramdict.params')
# load params
params1 = gluon.ParameterDict('net_')
params1.get('w0', shape=(10, 10))
params1.get('w1', shape=(10, 10), stype='row_sparse')
params1.load('test_paramdict.params', ctx)
trainer1 = mx.gluon.Trainer(params1, 'sgd')
# compare the values before and after save/load
cur_w0 = params1.get('w0').data(ctx)
cur_w1 = params1.get('w1').row_sparse_data(all_row_ids)
mx.test_utils.assert_almost_equal(prev_w0.asnumpy(), cur_w0.asnumpy())
mx.test_utils.assert_almost_equal(prev_w1.asnumpy(), cur_w1.asnumpy())
# create a new param dict with dense params, and load from the checkpoint
# of sparse & dense params
params2 = gluon.ParameterDict('net_')
params2.get('w0', shape=(10, 10))
params2.get('w1', shape=(10, 10))
params2.load('test_paramdict.params', ctx)
# compare the values before and after save/load
cur_w0 = params2.get('w0').data(ctx)
cur_w1 = params2.get('w1').data(ctx)
mx.test_utils.assert_almost_equal(prev_w0.asnumpy(), cur_w0.asnumpy())
mx.test_utils.assert_almost_equal(prev_w1.asnumpy(), cur_w1.asnumpy())
示例3: test_sparse_hybrid_block
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def test_sparse_hybrid_block():
params = gluon.ParameterDict('net_')
params.get('weight', shape=(5,5), stype='row_sparse', dtype='float32')
params.get('bias', shape=(5), dtype='float32')
net = gluon.nn.Dense(5, params=params)
net.initialize()
x = mx.nd.ones((2,5))
# an exception is expected when forwarding a HybridBlock w/ sparse param
y = net(x)
示例4: test_trainer_reset_kv
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def test_trainer_reset_kv():
def check_trainer_reset_kv(kv):
params = gluon.ParameterDict()
x = params.get('x', shape=(10,), lr_mult=1.0)
params.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1}, kvstore=kv)
params.save('test_trainer_reset_kv.params')
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore.type == kv
# load would reset kvstore
mx.nd.waitall()
params.load('test_trainer_reset_kv.params')
assert trainer._kvstore is None
assert trainer._kv_initialized is False
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
# the updated parameter should be based on the loaded checkpoint
assert (x.data(mx.cpu()) == -0.2).asnumpy().all()
kvs = ['local', 'device']
for kv in kvs:
check_trainer_reset_kv(kv)
示例5: test_trainer_sparse_kv
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def test_trainer_sparse_kv():
def check_trainer_sparse_kv(kv, stype, grad_stype, update_on_kv):
params = gluon.ParameterDict()
x = params.get('x', shape=(10,1), lr_mult=1.0, stype=stype, grad_stype=grad_stype)
params.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1}, kvstore=kv)
all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0))
ws = x.list_data() if stype == 'default' else x.list_row_sparse_data(all_rows)
with mx.autograd.record():
for w in ws:
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore.type == kv
assert trainer._kv_initialized
assert trainer._update_on_kvstore is update_on_kv
# the updated parameter should be based on the loaded checkpoint
mx.nd.waitall()
updated_w = x.data(mx.cpu(0)) if stype == 'default' else x.row_sparse_data(all_rows)
assert (updated_w == -0.2).asnumpy().all()
kvs = ['local', 'device']
for kv in kvs:
check_trainer_sparse_kv(kv, 'default', 'default', True)
check_trainer_sparse_kv(kv, 'default', 'row_sparse', False)
check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', True)
示例6: __init__
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def __init__(self, constants=None, dtype=None, context=None):
self.dtype = dtype if dtype is not None else get_default_dtype()
self.mxnet_context = context if context is not None else get_default_device()
self._constants = {}
self._var_ties = {}
if constants is not None:
constant_uuids = {
(k.uuid if isinstance(k, ModelComponent) else k): v
for k, v in constants.items()}
self._constants.update(constant_uuids)
self._params = ParameterDict()
示例7: initialize_params
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def initialize_params(self, graphs, observed_uuid):
"""
:param graphs: a list of graphs in which the parameters will be optimized.
:type graphs: a list of FactorGraph
:param observed_uuid: Parameter Variables that are passed in directly as data, not to be inferred.
:type observed_uuid: list, set
"""
if self._params is not None:
warnings.warn("InferenceParameters has already been initialized. The existing one will be overwritten.")
self._params = ParameterDict()
for g in graphs:
for var in g.get_constants():
self._constants[var.uuid] = var.constant
excluded = set(self._constants.keys()).union(observed_uuid)
for var in g.get_parameters(excluded=excluded):
var_shape = realize_shape(var.shape, self._constants)
init = initializer.Constant(var.initial_value_before_transformation) \
if var.initial_value is not None else None
self._params.get(name=var.uuid, shape=var_shape,
dtype=self.dtype,
allow_deferred_init=True, init=init)
for m in g.modules.values():
m.initialize_hidden_parameters(self._params, excluded, self._constants)
self._params.initialize(ctx=self.mxnet_context)
示例8: initialize_hidden_parameters
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def initialize_hidden_parameters(self, param_dict=None, excluded=None,
constants=None):
"""
Initialize all the hidden parameters.
:param param_dict: the MXNet ParameterDict for parameter initialization
:type param_dict: MXNet ParameterDict
:param excluded: the set of variables that are excluded from initialization
:type excluded: set(str(UUID))
:param constants: the constants discovered during initialization, to be used for shape inference
:type constants: {str(UUID): float or int}
"""
if param_dict is None:
param_dict = ParameterDict()
if excluded is None:
excluded = set()
if constants is None:
constants = {}
for g in [self._module_graph]+self._extra_graphs:
for var in g.get_parameters(
excluded=set([v.uuid for _, v in self.inputs] +
[v.uuid for _, v in self.outputs]
).union(constants.keys()).union(excluded),
include_inherited=True):
var_shape = realize_shape(var.shape, constants)
init = initializer.Constant(var.initial_value_before_transformation) \
if var.initial_value is not None else None
param_dict.get(name=var.uuid, shape=var_shape, dtype=self.dtype,
allow_deferred_init=True, init=init)
return param_dict
示例9: _override_block_parameters
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def _override_block_parameters(self, input_kws):
"""
When a probabilistic distribution is defined for the parameters of a Gluon block (in ParameterDict), a special
treatment is necessary because otherwise these parameters will be directly exposed to a gradient optimizer as
free parameters.
For each parameters of the Gluon bock with probabilistic distribution, this method dynamically sets its values
as the outcome of upstream computation and ensure the correct gradient can be estimated via automatic
differentiation.
:param **input_kws: the dict of inputs to the functions. The key in the dict should match with the name of
inputs specified in the
inputs of FunctionEvaluation.
:type **input_kws: {variable name: MXNet NDArray or MXNet Symbol}
"""
for bn in self.parameter_names:
if bn in input_kws:
val = input_kws[bn]
param = self._gluon_block.collect_params()[bn]
if isinstance(val, mx.ndarray.ndarray.NDArray):
ctx = val.context
ctx_list = param._ctx_map[ctx.device_typeid&1]
if ctx.device_id >= len(ctx_list) or ctx_list[ctx.device_id] is None:
raise Exception
dev_id = ctx_list[ctx.device_id]
param._data[dev_id] = val
else:
param._var = val
示例10: get_weights
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def get_weights(self) -> gluon.ParameterDict:
"""
:return: a ParameterDict containing all network weights
"""
return self.model.collect_params()
示例11: set_weights
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def set_weights(self, weights: gluon.ParameterDict, new_rate: float=1.0) -> None:
"""
Sets the network weights from the given ParameterDict
:param new_rate: ratio for adding new and old weight values: val=rate*weights + (1-rate)*old_weights
"""
old_weights = self.model.collect_params()
for name, p in weights.items():
name = name[len(weights.prefix):] # Strip prefix
old_p = old_weights[old_weights.prefix + name] # Add prefix
old_p.set_data(new_rate * p._reduce() + (1 - new_rate) * old_p._reduce())
示例12: __init__
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def __init__(self, name: str, param_dict: gluon.ParameterDict):
self._name = name
self._param_dict = param_dict
示例13: test_paramdict
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def test_paramdict():
params = gluon.ParameterDict('net_')
params.get('weight', shape=(10, 10))
assert list(params.keys()) == ['net_weight']
params.initialize(ctx=mx.cpu())
params.save('test.params')
params.load('test.params', mx.cpu())
示例14: load_parameters
# 需要導入模塊: from mxnet import gluon [as 別名]
# 或者: from mxnet.gluon import ParameterDict [as 別名]
def load_parameters(uuid_map=None,
mxnet_parameters=None,
variable_constants=None,
mxnet_constants=None,
context=None, dtype=None,
current_params=None):
"""
Loads back a set of InferenceParameters from files.
:param mxnet_parameters: These are the parameters of
the previous inference algorithm.
These are in a {uuid: mx.nd.array} mapping.
:type mxnet_parameters: Dict of {uuid: mx.nd.array}
:param mxnet_constants: These are the constants in mxnet format
from the previous inference algorithm.
These are in a {uuid: mx.nd.array} mapping.
:type mxnet_constants: Dict of {uuid: mx.nd.array}
:param variable_constants: These are the constants in
primitive format from the previous
inference algorithm.
:type variable_constants: dict of {uuid: constant primitive}
"""
def with_uuid_map(item, uuid_map):
if uuid_map is not None:
return uuid_map[item]
else:
return item
ip = InferenceParameters(context=context, dtype=dtype)
mapped_params = {with_uuid_map(k, uuid_map): v
for k, v in mxnet_parameters.items()}
new_paramdict = ParameterDict()
if current_params is not None:
new_paramdict.update(current_params)
# Do this because we need to map the uuids to the new Model
# before loading them into the ParamDict
for name, mapped_param in mapped_params.items():
new_paramdict[name]._load_init(mapped_param, ip.mxnet_context)
ip._params = new_paramdict
new_mxnet_constants = {}
new_variable_constants = {}
new_variable_constants = {with_uuid_map(k, uuid_map): v
for k, v in variable_constants.items()}
new_mxnet_constants = {with_uuid_map(k, uuid_map): v
for k, v in mxnet_constants.items()}
ip._constants = {}
ip._constants.update(new_variable_constants)
ip._constants.update(new_mxnet_constants)
return ip