本文整理汇总了Python中mxnet.test_utils.assert_almost_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_almost_equal函数的具体用法?Python assert_almost_equal怎么用?Python assert_almost_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_almost_equal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_quantized_pooling
def check_quantized_pooling(data_shape, kernel, pool_type, pad, stride, global_pool):
with mx.Context('gpu', 0):
data = mx.sym.Variable(name='data', shape=data_shape, dtype='float32')
pooling_fp32 = mx.sym.Pooling(data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, global_pool=global_pool, cudnn_off=False)
arg_shapes, _, _ = pooling_fp32.infer_shape(data=data_shape)
arg_names = pooling_fp32.list_arguments()
pooling_fp32_exe = pooling_fp32.simple_bind(ctx=mx.current_context(), grad_req='null')
pooling_fp32_exe.arg_dict[arg_names[0]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
shape=data_shape).astype('int32')
output = pooling_fp32_exe.forward()[0]
qdata = mx.sym.Variable(name='qdata', shape=data_shape, dtype='int8')
min_data = mx.sym.Variable(name='min_data')
max_data = mx.sym.Variable(name='max_data')
quantized_pooling = mx.sym.contrib.quantized_pooling(data=qdata, min_data=min_data,
max_data=max_data, kernel=kernel,
pad=pad, stride=stride, pool_type=pool_type,
global_pool=global_pool)
pooling_int8_exe = quantized_pooling.simple_bind(ctx=mx.current_context(), grad_req='null')
qarg_names = quantized_pooling.list_arguments()
pooling_int8_exe.arg_dict[qarg_names[0]][:] = pooling_fp32_exe.arg_dict[arg_names[0]].astype('int8')
quantized_range = 127.0
pooling_int8_exe.arg_dict[qarg_names[1]][:] = -quantized_range
pooling_int8_exe.arg_dict[qarg_names[2]][:] = quantized_range
qoutput, min_range, max_range = pooling_int8_exe.forward()
if pool_type == 'max':
assert_almost_equal(output.asnumpy(), qoutput.asnumpy())
elif pool_type == 'avg': # for avg pooling, fp32 and int8 may be different due to rounding errors
diff = mx.nd.abs(output - qoutput.astype(output.dtype))
cond = mx.nd.lesser(2, diff).sum().asscalar()
assert cond == 0
示例2: test_gnmt_encoder
def test_gnmt_encoder():
ctx = mx.Context.default_ctx
for cell_type in ["lstm", "gru", "relu_rnn", "tanh_rnn"]:
for num_layers, num_bi_layers in [(2, 1), (3, 0)]:
for use_residual in [False, True]:
encoder = GNMTEncoder(cell_type=cell_type, num_layers=num_layers,
num_bi_layers=num_bi_layers, hidden_size=8,
dropout=0.0, use_residual=use_residual,
prefix='gnmt_encoder_')
encoder.initialize(ctx=ctx)
encoder.hybridize()
for batch_size in [4]:
for seq_length in [5, 10]:
inputs_nd = mx.nd.random.normal(0, 1, shape=(batch_size, seq_length, 4), ctx=ctx)
valid_length_nd = mx.nd.array(np.random.randint(1, seq_length,
size=(batch_size,)), ctx=ctx)
encoder_outputs, _ = encoder(inputs_nd, valid_length=valid_length_nd)
valid_length_npy = valid_length_nd.asnumpy()
rnn_output = encoder_outputs[0].asnumpy()
for i in range(batch_size):
if valid_length_npy[i] < seq_length - 1:
padded_out = rnn_output[i, int(valid_length_npy[i]):, :]
assert_almost_equal(padded_out, np.zeros_like(padded_out), 1E-6, 1E-6)
assert(encoder_outputs[0].shape == (batch_size, seq_length, 8))
assert(len(encoder_outputs[1]) == num_layers)
示例3: test_activations
def test_activations():
point_to_validate = mx.nd.array([-0.1, 0.1] * 3)
swish = mx.gluon.nn.Swish()
def swish_test(x):
return x * mx.nd.sigmoid(x)
for test_point, ref_point in zip(swish_test(point_to_validate), swish(point_to_validate)):
assert test_point == ref_point
elu = mx.gluon.nn.ELU()
def elu_test(x):
def elu(x):
return 1.0 * (mx.nd.exp(x) - 1) if x < 0 else x
return [elu(x_i) for x_i in x]
for test_point, ref_point in zip(elu_test(point_to_validate), elu(point_to_validate)):
assert test_point == ref_point
selu = mx.gluon.nn.SELU()
def selu_test(x):
def selu(x):
scale, alpha = 1.0507009873554804934193349852946, 1.6732632423543772848170429916717
return scale * x if x >= 0 else alpha * mx.nd.exp(x) - alpha
return [selu(x_i) for x_i in x]
for test_point, ref_point in zip(selu(point_to_validate), selu(point_to_validate)):
assert test_point == ref_point
prelu = mx.gluon.nn.PReLU()
prelu.initialize()
x = point_to_validate.reshape((1, 3, 2))
assert_almost_equal(prelu(x).asnumpy(), mx.nd.where(x >= 0, x, 0.25 * x).asnumpy())
示例4: check_hybrid_static_memory
def check_hybrid_static_memory(**kwargs):
x = mx.nd.random.uniform(shape=(2, 3, 32, 32))
x.attach_grad()
net1 = gluon.model_zoo.vision.get_resnet(
1, 18, pretrained=True, prefix='net_', ctx=mx.context.current_context())
net2 = gluon.model_zoo.vision.get_resnet(
1, 18, pretrained=True, prefix='net_', ctx=mx.context.current_context())
net2.hybridize(**kwargs)
net1(x)
net2(x)
def test(net, x):
with mx.autograd.record():
y = net(x) + net(x)
y.backward()
grads = {k: v.grad() for k, v in net.collect_params().items() if v.grad_req != 'null'}
return y, grads
y1, grads1 = test(net1, x)
y2, grads2 = test(net2, x)
assert_almost_equal(y1.asnumpy(), y2.asnumpy(), rtol=1e-3, atol=1e-5)
for key in grads1:
assert_almost_equal(grads1[key].asnumpy(), grads2[key].asnumpy(), rtol=1e-3, atol=1e-5)
示例5: pull_init_test
def pull_init_test(kv):
# checks that compression is not applied to init of key
out = [mx.nd.zeros(shapes[0], mx.gpu(g)) for g in range(nworker)]
kv.pull(gc_init_test_key, out=out)
exp = np.ones_like(out[0].asnumpy())
for o in out:
assert_almost_equal(o.asnumpy(), exp)
示例6: check_rsp_pull
def check_rsp_pull(kv, ctxs, sparse_pull, is_same_rowid=False, use_slice=False):
count = len(ctxs)
num_rows = shape[0]
row_ids = []
all_row_ids = np.arange(num_rows)
vals = [mx.nd.sparse.zeros(shape=shape, ctx=ctxs[i], stype='row_sparse') for i in range(count)]
if is_same_rowid:
row_id = np.random.randint(num_rows, size=num_rows)
row_ids = [mx.nd.array(row_id)] * count
elif use_slice:
total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
else:
for i in range(count):
row_id = np.random.randint(num_rows, size=num_rows)
row_ids.append(mx.nd.array(row_id))
row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
vals_to_pull = vals[0] if len(vals) == 1 else vals
kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
for val, row_id in zip(vals, row_ids):
retained = val.asnumpy()
excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
for row in range(num_rows):
expected_val = np.zeros_like(retained[row])
expected_val += 0 if row in excluded_row_ids else 2
assert_almost_equal(retained[row], expected_val)
if sparse_pull is True:
kv.pull('e', out=vals_to_pull, ignore_sparse=False)
for val in vals:
retained = val.asnumpy()
expected_val = np.zeros_like(retained)
expected_val[:] = 2
assert_almost_equal(retained, expected_val)
示例7: check_compr_random
def check_compr_random(kv, threshold, nworker):
# set a seed so all workers generate same data. knowing this helps
# calculate expected value after pull
mx.random.seed(123)
rnd.seed(123)
nrepeat = 5
compr_random_keys_shapes = [('2121', shape),('212221',irregular_shape),('21221', big_shape)]
# use new keys so residual is 0 for calculation of expected
for k,s in compr_random_keys_shapes:
kv.init(k, mx.nd.zeros(s))
for k,s in compr_random_keys_shapes:
curr_residual = np.zeros(s)
for l in range(nrepeat):
orig_val = mx.nd.zeros(s)
kv.pull(k, orig_val)
grad = mx.nd.array(rnd.rand(s[0], s[1]))
# creates a copy because push changes grad because of assignment
grad_cpy = mx.nd.array(grad)
kv.push(k, grad)
val = mx.nd.zeros(s)
kv.pull(k, val)
diff = val - orig_val
# compute expected by using simulation of operator
compr, curr_residual, decompr = compute_expected_2bit_quantization(grad_cpy, curr_residual, threshold)
decompr *= nworker * rate
assert_almost_equal(diff.asnumpy(), decompr)
示例8: test_req
def test_req():
data = mx.nd.random.uniform(shape=(1,3,224,224))
label = mx.nd.random.uniform(shape=(1))
label[:] = 1
loss = gluon.loss.SoftmaxCrossEntropyLoss()
net = nn.HybridSequential()
net1 = nn.HybridSequential()
net1.add(nn.Dense(4))
net2 = nn.HybridSequential()
net2.add(nn.Dense(3))
net2.add(nn.Dense(2))
net.add(net1)
net.add(net2)
net.initialize()
net.hybridize()
for v in net.collect_params().values():
v.grad_req = 'add'
net.collect_params().zero_grad()
with mx.autograd.record():
pred = net(data)
l = loss(pred, label)
l.backward()
grad = net[0][0].weight.grad().mean().asnumpy()
# run twice to check req = add
pred = net(data)
l = loss(pred, label)
l.backward()
grad_double = net[0][0].weight.grad().mean().asnumpy()
assert_almost_equal(grad * 2, grad_double)
示例9: check_with_uniform
def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]):
"""check function consistency with uniform random numbers"""
if isinstance(arg_shapes, int):
assert dim
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
arg_shapes = [shape] * arg_shapes
for dtype in type_list:
ndarray_arg = []
numpy_arg = []
for s in arg_shapes:
npy = np.random.uniform(rmin, 10, s).astype(dtype)
narr = mx.nd.array(npy, dtype=dtype)
ndarray_arg.append(narr)
numpy_arg.append(npy)
out1 = uf(*ndarray_arg)
if npuf is None:
out2 = uf(*numpy_arg).astype(dtype)
else:
out2 = npuf(*numpy_arg).astype(dtype)
assert out1.shape == out2.shape
if isinstance(out1, mx.nd.NDArray):
out1 = out1.asnumpy()
if dtype == np.float16:
assert_almost_equal(out1, out2, rtol=2e-3)
else:
assert_almost_equal(out1, out2)
示例10: check_quantized_conv
def check_quantized_conv(data_shape, kernel, num_filter, pad, stride, no_bias):
with mx.Context('gpu', 0):
# run fp32 conv
data = mx.sym.Variable(name='data', shape=data_shape, dtype='float32')
conv2d = mx.sym.Convolution(data=data, kernel=kernel, num_filter=num_filter, pad=pad, stride=stride,
no_bias=no_bias, cudnn_off=False, name='conv2d')
arg_shapes, _, _ = conv2d.infer_shape(data=data_shape)
arg_names = conv2d.list_arguments()
conv_exe_fp32 = conv2d.simple_bind(ctx=mx.current_context(), grad_req='null')
conv_exe_fp32.arg_dict[arg_names[0]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
shape=data_shape).astype('int32')
conv_exe_fp32.arg_dict[arg_names[1]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
shape=arg_shapes[1]).astype('int32')
if not no_bias:
conv_exe_fp32.arg_dict[arg_names[2]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
shape=arg_shapes[2]).astype('int32')
output = conv_exe_fp32.forward()[0]
# run quantized conv
qdata = mx.sym.Variable(name='qdata', shape=data_shape, dtype='int8')
qweight = mx.sym.Variable(name='qweight', dtype='int8')
min_data = mx.sym.Variable(name='min_data')
max_data = mx.sym.Variable(name='max_data')
min_weight = mx.sym.Variable(name='min_weight')
max_weight = mx.sym.Variable(name='max_weight')
quantized_conv2d = mx.sym.contrib.quantized_conv(data=qdata, weight=qweight, min_data=min_data,
max_data=max_data, min_weight=min_weight,
max_weight=max_weight, kernel=kernel,
num_filter=num_filter, pad=pad, stride=stride,
no_bias=no_bias)
qarg_names = quantized_conv2d.list_arguments()
type_dict = None
if not no_bias:
type_dict = {qarg_names[2]: 'int8'}
conv_exe_int8 = quantized_conv2d.simple_bind(ctx=mx.current_context(), type_dict=type_dict, grad_req='null')
conv_exe_int8.arg_dict[qarg_names[0]][:] = conv_exe_fp32.arg_dict[arg_names[0]].astype('int8')
conv_exe_int8.arg_dict[qarg_names[1]][:] = conv_exe_fp32.arg_dict[arg_names[1]].astype('int8')
quantized_range = 127.0
if no_bias:
conv_exe_int8.arg_dict[qarg_names[2]][:] = -quantized_range
conv_exe_int8.arg_dict[qarg_names[3]][:] = quantized_range
conv_exe_int8.arg_dict[qarg_names[4]][:] = -quantized_range
conv_exe_int8.arg_dict[qarg_names[5]][:] = quantized_range
else:
conv_exe_int8.arg_dict[qarg_names[2]][:] = conv_exe_fp32.arg_dict[arg_names[2]].astype('int8')
conv_exe_int8.arg_dict[qarg_names[3]][:] = -quantized_range
conv_exe_int8.arg_dict[qarg_names[4]][:] = quantized_range
conv_exe_int8.arg_dict[qarg_names[5]][:] = -quantized_range
conv_exe_int8.arg_dict[qarg_names[6]][:] = quantized_range
conv_exe_int8.arg_dict[qarg_names[7]][:] = -quantized_range
conv_exe_int8.arg_dict[qarg_names[8]][:] = quantized_range
qoutput, min_range, max_range = conv_exe_int8.forward()
if no_bias:
assert_almost_equal(output.asnumpy(), qoutput.asnumpy())
else:
# with adding bias, accuracy loss should not be greater than one
diff = mx.nd.abs(output - qoutput.astype(output.dtype))
cond = mx.nd.lesser(2, diff).sum().asscalar()
assert cond == 0
示例11: test_smooth_distribution
def test_smooth_distribution():
assert_exception(lambda: mx.contrib.quant._smooth_distribution(np.zeros((2,)), eps=1e-3), ValueError)
dirac_delta = np.zeros((5,))
dirac_delta[2] = 1
smooth_dirac_delta = dirac_delta.copy()
smooth_dirac_delta += 1e-3
smooth_dirac_delta[2] -= 5e-3
assert_almost_equal(mx.contrib.quant._smooth_distribution(dirac_delta, eps=1e-3), smooth_dirac_delta)
示例12: test_bce_equal_ce2
def test_bce_equal_ce2():
N = 100
loss1 = gluon.loss.SigmoidBCELoss(from_sigmoid=True)
loss2 = gluon.loss.SoftmaxCELoss(from_logits=True)
out1 = mx.random.uniform(0.1, 0.9, shape=(N, 1))
out2 = mx.nd.log(mx.nd.concat(1-out1, out1, dim=1) + 1e-8)
label = mx.nd.round(mx.random.uniform(0, 1, shape=(N, 1)))
assert_almost_equal(loss1(out1, label).asnumpy(), loss2(out2, label).asnumpy())
示例13: pull_before_push
def pull_before_push(kv):
for i in range(nrepeat):
for j in range(len(keys)):
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
exp = np.zeros_like(out[0].asnumpy())
for o in out:
assert_almost_equal(o.asnumpy(), exp)
示例14: softmax_forward
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(mx.cpu(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
示例15: test_logistic_loss_equal_bce
def test_logistic_loss_equal_bce():
N = 100
loss_binary = gluon.loss.LogisticLoss(label_format='binary')
loss_signed = gluon.loss.LogisticLoss(label_format='signed')
loss_bce = gluon.loss.SigmoidBCELoss(from_sigmoid=False)
data = mx.random.uniform(-10, 10, shape=(N, 1))
label = mx.nd.round(mx.random.uniform(0, 1, shape=(N, 1)))
assert_almost_equal(loss_binary(data, label).asnumpy(), loss_bce(data, label).asnumpy())
assert_almost_equal(loss_signed(data, 2 * label - 1).asnumpy(), loss_bce(data, label).asnumpy())