本文整理汇总了Python中mxnet.symbol方法的典型用法代码示例。如果您正苦于以下问题:Python mxnet.symbol方法的具体用法?Python mxnet.symbol怎么用?Python mxnet.symbol使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet
的用法示例。
在下文中一共展示了mxnet.symbol方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_symbol_compose
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_symbol_compose():
data = mx.symbol.Variable('data')
net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100)
net1.list_arguments() == ['data',
'fc1_weight', 'fc1_bias',
'fc2_weight', 'fc2_bias']
net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
net2 = mx.symbol.Activation(data=net2, act_type='relu')
net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20)
composed = net2(fc3_data=net1, name='composed')
multi_out = mx.symbol.Group([composed, net1])
assert len(multi_out.list_outputs()) == 2
assert len(multi_out) == 2
示例2: test_symbol_children
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_symbol_children():
data = mx.symbol.Variable('data')
oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)
assert net1.get_children().list_outputs() == ['fc1_output', 'fc2_weight', 'fc2_bias']
assert len(net1.get_children()) == 3
assert net1.get_children().get_children().list_outputs() == ['data', 'fc1_weight', 'fc1_bias']
assert len(net1.get_children().get_children()) == 3
assert net1.get_children()['fc2_weight'].list_arguments() == ['fc2_weight']
assert net1.get_children()['fc2_weight'].get_children() is None
data = mx.sym.Variable('data')
sliced = mx.sym.SliceChannel(data, num_outputs=3, name='slice')
concat = mx.sym.Concat(*list(sliced))
assert concat.get_children().list_outputs() == \
['slice_output0', 'slice_output1', 'slice_output2']
assert sliced.get_children().list_outputs() == ['data']
示例3: test_symbol_infer_shape_var
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_symbol_infer_shape_var():
"Test specifying shape information when constructing a variable"
shape = (2, 3)
a = mx.symbol.Variable('a', shape=shape)
b = mx.symbol.Variable('b')
c = mx.symbol.elemwise_add(a, b)
arg_shapes, out_shapes, aux_shapes = c.infer_shape()
assert arg_shapes[0] == shape
assert arg_shapes[1] == shape
assert out_shapes[0] == shape
overwrite_shape = (5, 6)
arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=overwrite_shape)
assert arg_shapes[0] == overwrite_shape
assert arg_shapes[1] == overwrite_shape
assert out_shapes[0] == overwrite_shape
示例4: test_zero_prop
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_zero_prop():
data = mx.symbol.Variable('data')
for i in range(10):
data = data * data
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
big = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256), grad_req='null')
small1 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
data = mx.sym.stop_gradient(data)
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
small2 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
assert big > small2
assert small1 == small2
示例5: predict
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def predict(self, X):
assert self.is_fitted
ys = []
n = X.shape[0]
batch_size = min(n, 2**13)
mod = mx.mod.Module(symbol=self.output, label_names=None)
for _ in range(2):
eval_iter = self._make_train_iter(
X[len(ys):, :], y=None, batch_size=batch_size, shuffle=False)
mod.bind(
data_shapes=eval_iter.provide_data,
label_shapes=None,
for_training=False,
force_rebind=True,
)
mod.set_params(*self.mod_params)
ys.extend(mod.predict(eval_iter).asnumpy())
batch_size = n % batch_size
if batch_size == 0:
break
assert len(ys) == n
return self._invert_target(ys)
示例6: drop_layer_top
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def drop_layer_top(self, num_layers_to_drop=1):
"""
Remove layers from output of model.
:param int n: Number of layers to remove from model output.
"""
network_symbol = self.symbol
network = self._get_symbol_dict(network_symbol)
self._assert_drop_layer_valid(num_layers_to_drop)
self._assert_model_has_single_output(self._get_symbol_dict(network_symbol))
layers_dropped = []
last_layer = len(network[consts.NODES]) - 1
for n in range(num_layers_to_drop):
last_layer_inputs = self._get_names_of_inputs_to_layer(symbol_dict=network, node_idx=last_layer)
self._assert_layer_drop_not_ambiguous(possible_layers_to_drop=last_layer_inputs, layer_drop_number=n)
# There will only be one value in possible_layers_to_drop
layers_dropped.append(network[consts.NODES][last_layer][consts.NAME])
last_layer = last_layer_inputs[0]
network_symbol = network_symbol.get_internals()[network[consts.NODES][last_layer][consts.NAME] + consts.OUTPUT]
logging.info('{} deleted from model top'.format(', '.join(layers_dropped)))
self.update_sym(network_symbol)
示例7: __init__
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def __init__(self,
field_name: str,
numeric_latent_dim: int = 100,
numeric_hidden_layers: int = 1) -> None:
super(NumericalFeaturizer, self).__init__(field_name, numeric_latent_dim)
self.numeric_hidden_layers = int(numeric_hidden_layers)
self.numeric_latent_dim = int(numeric_latent_dim)
with mx.name.Prefix(self.prefix):
self.symbol = self.input_symbol
for _ in range(self.numeric_hidden_layers):
symbol = mx.sym.FullyConnected(
data=self.symbol,
num_hidden=self.numeric_latent_dim
)
self.symbol = mx.symbol.Activation(data=symbol, act_type="relu")
示例8: __make_numerical_loss
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def __make_numerical_loss(latents: mx.symbol,
label_field_name: str) -> Tuple[Any, Any]:
"""
Generate output symbol for univariate numeric loss
:param latents:
:param label_field_name:
:return: mxnet symbols for predictions and loss
"""
# generate prediction symbol
pred = mx.sym.FullyConnected(
data=latents,
num_hidden=1,
name="label_{}".format(label_field_name))
target = mx.sym.Variable(label_field_name)
# squared loss
loss = mx.sym.sum((pred - target) ** 2.0)
return pred, loss
示例9: conv_act_layer
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}_conv".format(name))
if use_batchnorm:
conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
return relu
示例10: legacy_conv_act_layer
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
assert not use_batchnorm, "batchnorm not yet supported"
bias = mx.symbol.Variable(name="conv{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="conv{}".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}{}".format(act_type, name))
if use_batchnorm:
relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
return conv, relu
示例11: test_symbol_bool
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_symbol_bool():
x = mx.symbol.Variable('x')
assertRaises(NotImplementedForSymbol, bool, x)
示例12: test_symbol_copy
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_symbol_copy():
data = mx.symbol.Variable('data')
data_2 = copy.deepcopy(data)
data_3 = copy.copy(data)
assert data.tojson() == data_2.tojson()
assert data.tojson() == data_3.tojson()
示例13: test_symbol_saveload
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_symbol_saveload():
sym = models.mlp2()
fname = 'tmp_sym.json'
sym.save(fname)
data2 = mx.symbol.load(fname)
# save because of order
assert sym.tojson() == data2.tojson()
os.remove(fname)
示例14: test_symbol_infer_type
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_symbol_infer_type():
data = mx.symbol.Variable('data')
f32data = mx.symbol.Cast(data=data, dtype='float32')
fc1 = mx.symbol.FullyConnected(data = f32data, name='fc1', num_hidden=128)
mlp = mx.symbol.SoftmaxOutput(data = fc1, name = 'softmax')
arg, out, aux = mlp.infer_type(data=np.float16)
assert arg == [np.float16, np.float32, np.float32, np.float32]
assert out == [np.float32]
assert aux == []
示例15: test_symbol_infer_shape
# 需要导入模块: import mxnet [as 别名]
# 或者: from mxnet import symbol [as 别名]
def test_symbol_infer_shape():
num_hidden = 128
num_dim = 64
num_sample = 10
data = mx.symbol.Variable('data')
prev = mx.symbol.Variable('prevstate')
x2h = mx.symbol.FullyConnected(data=data, name='x2h', num_hidden=num_hidden)
h2h = mx.symbol.FullyConnected(data=prev, name='h2h', num_hidden=num_hidden)
out = mx.symbol.Activation(data=mx.sym.elemwise_add(x2h, h2h), name='out', act_type='relu')
# shape inference will fail because information is not available for h2h
ret = out.infer_shape(data=(num_sample, num_dim))
assert ret == (None, None, None)
arg, out_shapes, aux_shapes = out.infer_shape_partial(data=(num_sample, num_dim))
arg_shapes = dict(zip(out.list_arguments(), arg))
assert arg_shapes['data'] == (num_sample, num_dim)
assert arg_shapes['x2h_weight'] == (num_hidden, num_dim)
assert arg_shapes['h2h_weight'] == ()
# now we can do full shape inference
state_shape = out_shapes[0]
arg, out_shapes, aux_shapes = out.infer_shape(data=(num_sample, num_dim), prevstate=state_shape)
arg_shapes = dict(zip(out.list_arguments(), arg))
assert arg_shapes['data'] == (num_sample, num_dim)
assert arg_shapes['x2h_weight'] == (num_hidden, num_dim)
assert arg_shapes['h2h_weight'] == (num_hidden, num_hidden)