本文整理汇总了Python中nnvm.testing.config.ctx_list函数的典型用法代码示例。如果您正苦于以下问题:Python ctx_list函数的具体用法?Python ctx_list怎么用?Python ctx_list使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ctx_list函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: verify_mxnet_frontend_impl
def verify_mxnet_frontend_impl(mx_symbol, data_shape=(1, 3, 224, 224), out_shape=(1, 1000),
gluon_impl=False, name=None, dtype='float32'):
"""Use name different from test to avoid let nose pick it up"""
if gluon_impl:
def get_gluon_output(name, x):
net = vision.get_model(name)
net.collect_params().initialize(mx.init.Xavier())
net_sym = gluon.nn.SymbolBlock(outputs=net(mx.sym.var('data')),
inputs=mx.sym.var('data'),
params=net.collect_params())
out = net_sym(mx.nd.array(x.astype(dtype))).asnumpy()
return out, net_sym
else:
def get_mxnet_output(symbol, x, dtype='float32'):
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
mod = mx.mod.Module(symbol, label_names=None)
mod.bind(data_shapes=[('data', x.shape)], for_training=False)
mod.init_params()
mod.forward(Batch([mx.nd.array(x.astype(dtype))]))
out = mod.get_outputs()[0].asnumpy()
args, auxs = mod.get_params()
return out, args, auxs
def get_tvm_output(symbol, x, args, auxs, target, ctx, dtype='float32'):
if gluon_impl:
new_sym, params = frontend.from_mxnet(symbol)
else:
new_sym, params = frontend.from_mxnet(symbol, args, auxs)
dshape = x.shape
shape_dict = {'data': dshape}
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
# random input
x = np.random.uniform(size=data_shape)
if gluon_impl:
gluon_out, gluon_sym = get_gluon_output(name, x)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(gluon_sym, x, None, None, target, ctx, dtype)
tvm.testing.assert_allclose(gluon_out, tvm_out, rtol=1e-5, atol=1e-5)
else:
mx_out, args, auxs = get_mxnet_output(mx_symbol, x, dtype)
assert "data" not in args
for target, ctx in ctx_list():
tvm_out = get_tvm_output(mx_symbol, x, args, auxs, target, ctx, dtype)
tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
示例2: test_multibox_transform_loc
def test_multibox_transform_loc():
batch_size = 1
num_anchors = 3
num_classes = 3
cls_prob = sym.Variable("cls_prob")
loc_preds = sym.Variable("loc_preds")
anchors = sym.Variable("anchors")
transform_loc_data, valid_count = sym.multibox_transform_loc(cls_prob=cls_prob, loc_pred=loc_preds,
anchor=anchors)
out = sym.non_max_suppression(data=transform_loc_data, valid_count=valid_count, return_indices=False)
# Manually create test case
np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]])
np_loc_preds = np.array([[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]])
np_anchors = np.array([[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]])
expected_np_out = np.array([[[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
[0, 0.44999999, 1, 1, 1, 1],
[0, 0.30000001, 0, 0, 0.22903419, 0.20435292]]])
dtype = "float32"
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(out, target, {"cls_prob": (batch_size, num_anchors, num_classes),
"loc_preds": (batch_size, num_anchors * 4),
"anchors": (1, num_anchors, 4)})
m = graph_runtime.create(graph, lib, ctx)
m.set_input(**{"cls_prob": np_cls_prob.astype(dtype), "loc_preds": np_loc_preds.astype(dtype), "anchors": np_anchors.astype(dtype)})
m.run()
tvm_out = m.get_output(0, tvm.nd.empty(expected_np_out.shape, dtype))
tvm.testing.assert_allclose(tvm_out.asnumpy(), expected_np_out, atol=1e-5, rtol=1e-5)
示例3: verify_lrn
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
in_array = np.random.uniform(size=shape).astype(dtype)
if alpha == None and beta == None and bias==None:
alpha = 0.0001
beta = 0.75
bias = 1.0
node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], size=nsize)
else:
node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], alpha=alpha,
beta=beta, bias=bias, size=nsize)
graph = helper.make_graph([node],
"lrn_test",
inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))])
model = helper.make_model(graph, producer_name='lrn_test')
def _get_python_lrn():
square_sum = np.zeros(shape).astype(dtype)
for n, c, h, w in np.ndindex(in_array.shape):
square_sum[n, c, h, w] = sum(in_array[n,
max(0, c - int(math.floor((nsize - 1) / 2))): \
min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),
h,
w] ** 2)
py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta)
return py_out
for target, ctx in ctx_list():
input_name = model.graph.input[0].name
py_out = _get_python_lrn()
tvm_out = get_tvm_output(model, in_array, target, ctx, py_out.shape, 'float32')
tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5)
示例4: test_shape
def test_shape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node('Constant',
inputs=[],
outputs=['ref_in'],
value=onnx.helper.make_tensor(name = 'const_tensor',
data_type = onnx.TensorProto.INT32,
dims = ref_array.shape,
vals = ref_array.flatten().astype(int)))
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
shape_node = helper.make_node("Shape", ['out'], ['final_out'])
graph = helper.make_graph([ref_node, reshape_node, shape_node],
"shape_test",
inputs = [helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape))],
outputs = [helper.make_tensor_value_info("final_out",
TensorProto.FLOAT, list(ref_shape))])
model = helper.make_model(graph, producer_name='shape_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('int32')
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32')
tvm.testing.assert_allclose(ref_shape, tvm_out)
示例5: test_non_max_suppression
def test_non_max_suppression():
dshape = (1, 5, 6)
data = sym.Variable("data")
valid_count = sym.Variable("valid_count", dtype="int32")
iou_threshold = 0.7
force_suppress = True
top_k = 2
out = sym.non_max_suppression(data=data, valid_count=valid_count, return_indices=False,
iou_threshold=iou_threshold, force_suppress=force_suppress, top_k=top_k)
np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110]]]).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1]]])
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(out, target, {"data": dshape, "valid_count": (dshape[0],)},
dtype={"data": "float32", "valid_count": "int32"})
m = graph_runtime.create(graph, lib, ctx)
m.set_input(**{"data": np_data, "valid_count": np_valid_count})
m.run()
tvm_out = m.get_output(0, tvm.nd.empty(np_result.shape, "float32"))
tvm.testing.assert_allclose(tvm_out.asnumpy(), np_result, atol=1e-5, rtol=1e-5)
示例6: verify_caffe2_forward_impl
def verify_caffe2_forward_impl(model, data_shape, out_shape):
dtype = 'float32'
data = np.random.uniform(size=data_shape).astype(dtype)
c2_out = get_caffe2_output(model, data, dtype)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, data, target, ctx, out_shape, dtype)
tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
示例7: test_forward_where
def test_forward_where():
cond = mx.sym.var('cond')
x = mx.sym.var('x')
y = mx.sym.var('y')
dshape = (2, 2)
dtype = 'float32'
mx_sym = mx.sym.where(cond, x, y)
np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
np_x = np.random.uniform(size=dshape).astype(dtype)
np_y = np.random.uniform(size=dshape).astype(dtype)
mx_cond = mx.nd.array(np_cond)
mx_x = mx.nd.array(np_x)
mx_y = mx.nd.array(np_y)
mod = mx.mod.Module(mx_sym, label_names=None, data_names=['cond', 'x', 'y'])
mod.bind(data_shapes=[('cond', dshape), ('x', dshape), ('y', dshape)], for_training=False)
mod.init_params()
args, auxs = mod.get_params()
mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()
out_shape = dshape
new_sym, params = frontend.from_mxnet(mx_sym, args, auxs)
shape_dict = {'cond': dshape, 'x': dshape, 'y': dshape}
for target, ctx in ctx_list():
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input("cond", tvm.nd.array(np_cond))
m.set_input("x", tvm.nd.array(np_x))
m.set_input("y", tvm.nd.array(np_y))
m.set_input(**params)
m.run()
# get outputs
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
示例8: test_mixed_precision
def test_mixed_precision():
x = sym.Variable("x")
dtype = "int8"
out_dtype="int32"
y = sym.conv2d(x,
channels=10,
kernel_size=(3,3),
name="y",
padding=(1,1),
use_bias=False,
out_dtype="int32")
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
oshape = (1, 10, 18, 18)
shape_dict = {"x": dshape}
dtype_dict = {"x": dtype}
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict, dtype_dict)
m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(-127, 127, size=dshape).astype(dtype))
kernel = tvm.nd.array(np.random.uniform(-127, 127, size=kshape).astype(dtype))
m.run(x=data, y_weight=kernel)
out = m.get_output(0, tvm.nd.empty(oshape, out_dtype))
c_np = topi.testing.conv2d_nchw_python(
data.asnumpy().astype(out_dtype),
kernel.asnumpy().astype(out_dtype), 1, 1)
tvm.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)
示例9: run_model_checkonly
def run_model_checkonly(model_file, model_name=''):
model = cm.models.MLModel(model_file)
sym, params = nnvm.frontend.from_coreml(model)
x = model_zoo.get_cat_image()
for target, ctx in ctx_list():
tvm_output = get_tvm_output(sym, x, params, target, ctx)
print(target, ctx, model_name, 'prediction id: ', np.argmax(tvm_output.flat))
示例10: helper
def helper(symbol, inputs, dtype,
np_forward, np_backward=None):
ishapes = {}
input_syms = []
np_inputs = {}
for (k, v) in inputs.items():
ishapes.update({k: v[0]})
np_inputs.update({k: np.random.uniform(size=v[0]).astype(dtype)})
if len(v) > 1:
input_syms.append(v[1])
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(symbol, target, ishapes)
m = graph_runtime.create(graph, lib, ctx)
m.run(**np_inputs)
y_np = np_forward(**np_inputs)
out = m.get_output(0, tvm.nd.empty(y_np.shape, dtype))
np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)
# backward
if np_backward:
graph._set_symbol_list_attr("grad_ys", symbol)
for x in input_syms:
graph._set_symbol_list_attr("grad_xs", x)
graph._set_symbol_list_attr("grad_ys_out_grad", sym.Variable("head_grads"))
graph = graph.apply("Gradient")
ishapes.update({"head_grads": y_np.shape})
graph, lib, _ = nnvm.compiler.build(graph, target, ishapes)
m = graph_runtime.create(graph, lib, ctx)
head_grads = np.random.uniform(size=y_np.shape).astype(dtype)
y_np = head_grads * np_backward(**np_inputs)
m.run(head_grads=head_grads, **np_inputs)
out = m.get_output(0, tvm.nd.empty(y_np.shape, dtype))
np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)
示例11: test_reshape_like
def test_reshape_like():
in_shape = (4, 3, 3, 4)
ref_shape = (3, 4, 4, 3)
ref_array = np.random.uniform(size=ref_shape).astype('float32')
ref_node = onnx.helper.make_node('Constant',
inputs=[],
outputs=['ref_in'],
value=onnx.helper.make_tensor(name = 'const_tensor',
data_type = onnx.TensorProto.FLOAT,
dims = ref_array.shape,
vals = ref_array.flatten().astype(float)))
copy_node = helper.make_node("Identity", ["ref_in"], ["copy_in"])
reshape_node = helper.make_node("Reshape", ["in", "copy_in"], ["out"])
graph = helper.make_graph([ref_node, copy_node, reshape_node],
"reshape_like_test",
inputs = [helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape))],
outputs = [helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(ref_shape))])
model = helper.make_model(graph, producer_name='reshape_like_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('float32')
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
示例12: verify_min
def verify_min(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.min((a_np1, a_np2, a_np3), axis=0)
inputs = [('input1', datatypes.Array(*input_dim)),
('input2', datatypes.Array(*input_dim)),
('input3', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Min',
input_names=['input1', 'input2', 'input3'],
output_name='output',
mode='MIN')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model,
[a_np1, a_np2, a_np3],
['input1', 'input2', 'input3'],
b_np.shape,
dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
示例13: verify_split
def verify_split(indata, outdatas, split, axis=0):
indata = np.array(indata).astype(np.float32)
outdatas = [np.array(o).astype(np.float32) for o in outdatas]
node = helper.make_node(
'Split',
inputs=['input'],
outputs=['output_{}'.format(i) for i in range(len(split))],
axis=axis,
split=split
)
graph = helper.make_graph([node],
'split_test',
inputs = [helper.make_tensor_value_info("input",
TensorProto.FLOAT, list(indata.shape))],
outputs = [helper.make_tensor_value_info("output_{}".format(i),
TensorProto.FLOAT, list(outdatas[i].shape))
for i in range(len(split))
])
model = helper.make_model(graph, producer_name='split_test')
for target, ctx in ctx_list():
output_shape = [o.shape for o in outdatas]
output_type = ['float32', 'float32', 'float32']
tvm_out = get_tvm_output(model, indata, target, ctx, output_shape, output_type)
for o, t in zip(outdatas, tvm_out):
tvm.testing.assert_allclose(o, t)
示例14: test_update
def test_update():
w = sym.Variable("w")
w2 = sym.Variable("w2")
w = sym._assign(w, w + 1)
w2 = sym._assign(w2, w + 1)
dshape = (5, 3, 18, 18)
shape_dict = {"w": dshape, "w2":dshape}
dtype = "float32"
def check(target, ctx):
graph, lib, _ = nnvm.compiler.build(w2, target, shape_dict)
m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.set_input("w", data)
m.run()
out = m.get_input("w2", tvm.nd.empty(dshape, dtype))
tvm.testing.assert_allclose(out.asnumpy(), data.asnumpy() + 2, rtol=1e-5)
m.run()
out = m.get_input("w2", tvm.nd.empty(dshape, dtype))
tvm.testing.assert_allclose(out.asnumpy(), data.asnumpy() + 3, rtol=1e-5)
for target, ctx in ctx_list():
check(target, ctx)
示例15: verify_reduce_x
def verify_reduce_x(name, indata, axis, keepdims):
indata = np.array(indata).astype(np.float32)
# numpy expect result
if name == 'ReduceMax':
outdata = np.maximum.reduce(indata, axis=axis, keepdims=keepdims == 1)
elif name == 'ReduceMin':
outdata = np.minimum.reduce(indata, axis=axis, keepdims=keepdims == 1)
elif name == 'ReduceSum':
outdata = np.sum(indata, axis=axis, keepdims=keepdims == 1)
elif name == 'ReduceMean':
outdata = np.mean(indata, axis=axis, keepdims=keepdims == 1)
else:
raise Exception('unsupport op: {}'.format(name))
if len(np.asarray(outdata).shape) == 0:
outdata = np.asarray([outdata])
# onnx graph
if axis is None:
node = helper.make_node(name, inputs=['input'], outputs=['output'],
keepdims=keepdims)
else:
node = helper.make_node(name, inputs=['input'], outputs=['output'],
axis=axis, keepdims=keepdims)
graph = helper.make_graph([node],
'{}_test'.format(name),
inputs = [helper.make_tensor_value_info("input",
TensorProto.FLOAT, list(indata.shape))],
outputs = [helper.make_tensor_value_info("output",
TensorProto.FLOAT, list(outdata.shape))])
model = helper.make_model(graph, producer_name='{}_test'.format(name))
# tvm result
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)