本文整理汇总了Python中tvm.contrib.graph_runtime.create函数的典型用法代码示例。如果您正苦于以下问题:Python create函数的具体用法?Python create怎么用?Python create使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了create函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: helper
def helper(symbol, inputs, dtype,
np_forward, np_backward=None):
ishapes = {}
input_syms = []
np_inputs = {}
for (k, v) in inputs.items():
ishapes.update({k: v[0]})
np_inputs.update({k: np.random.uniform(size=v[0]).astype(dtype)})
if len(v) > 1:
input_syms.append(v[1])
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(symbol, target, ishapes)
m = graph_runtime.create(graph, lib, ctx)
m.run(**np_inputs)
y_np = np_forward(**np_inputs)
out = m.get_output(0, tvm.nd.empty(y_np.shape, dtype))
np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)
# backward
if np_backward:
graph._set_symbol_list_attr("grad_ys", symbol)
for x in input_syms:
graph._set_symbol_list_attr("grad_xs", x)
graph._set_symbol_list_attr("grad_ys_out_grad", sym.Variable("head_grads"))
graph = graph.apply("Gradient")
ishapes.update({"head_grads": y_np.shape})
graph, lib, _ = nnvm.compiler.build(graph, target, ishapes)
m = graph_runtime.create(graph, lib, ctx)
head_grads = np.random.uniform(size=y_np.shape).astype(dtype)
y_np = head_grads * np_backward(**np_inputs)
m.run(head_grads=head_grads, **np_inputs)
out = m.get_output(0, tvm.nd.empty(y_np.shape, dtype))
np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)
示例2: test_concatenate_conv2d
def test_concatenate_conv2d():
ch = 3
size = 8
data = sym.Variable(name="data")
concat = sym.concatenate(data, data, axis=1)
conv = sym.conv2d(data=concat, kernel_size=(1,1), channels=ch*2, use_bias=False, name="conv")
net = sym.elemwise_add(concat, conv)
dtype="float32"
dshape = (1, ch, size, size)
kshape = (ch*2, ch*2, 1, 1)
oshape = (1, ch*2, size, size)
shape_dict = {"data": dshape}
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(net, target, shape_dict)
# data, conv weight, conv op, concat
assert graph.index.num_nodes == 4
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
m = graph_runtime.create(graph, lib, ctx)
m.run(data=data, conv_weight=kernel)
# get output
out = m.get_output(0, tvm.nd.empty(oshape, dtype))
concat = np.concatenate((data.asnumpy(), data.asnumpy()), axis=1)
conv = topi.testing.conv2d_nchw_python(
concat, kernel.asnumpy(), (1,1), 'SAME')
ref = concat + conv
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5)
示例3: tune_and_evaluate
def tune_and_evaluate(tuning_opt):
# extract workloads from nnvm graph
print("Extract tasks...")
net, params, data_shape, out_shape = get_network(model_name, batch_size)
tasks = autotvm.task.extract_from_graph(net, target=target,
shape={'data': data_shape}, dtype=dtype,
symbols=(nnvm.sym.conv2d,))
# run tuning tasks
print("Tuning...")
tune_kernels(tasks, **tuning_opt)
# compile kernels with history best records
with autotvm.apply_history_best(log_file):
print("Compile...")
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(
net, target=target, shape={'data': data_shape}, params=params, dtype=dtype)
# upload parameters to device
ctx = tvm.cpu()
data_tvm = tvm.nd.array((np.random.uniform(size=data_shape)).astype(dtype))
module = runtime.create(graph, lib, ctx)
module.set_input('data', data_tvm)
module.set_input(**params)
# evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run", ctx, number=100, repeat=3)
prof_res = np.array(ftimer().results) * 1000 # convert to millisecond
print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
(np.mean(prof_res), np.std(prof_res)))
示例4: test_forward_where
def test_forward_where():
cond = mx.sym.var('cond')
x = mx.sym.var('x')
y = mx.sym.var('y')
dshape = (2, 2)
dtype = 'float32'
mx_sym = mx.sym.where(cond, x, y)
np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
np_x = np.random.uniform(size=dshape).astype(dtype)
np_y = np.random.uniform(size=dshape).astype(dtype)
mx_cond = mx.nd.array(np_cond)
mx_x = mx.nd.array(np_x)
mx_y = mx.nd.array(np_y)
mod = mx.mod.Module(mx_sym, label_names=None, data_names=['cond', 'x', 'y'])
mod.bind(data_shapes=[('cond', dshape), ('x', dshape), ('y', dshape)], for_training=False)
mod.init_params()
args, auxs = mod.get_params()
mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()
out_shape = dshape
new_sym, params = frontend.from_mxnet(mx_sym, args, auxs)
shape_dict = {'cond': dshape, 'x': dshape, 'y': dshape}
for target, ctx in ctx_list():
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input("cond", tvm.nd.array(np_cond))
m.set_input("x", tvm.nd.array(np_x))
m.set_input("y", tvm.nd.array(np_y))
m.set_input(**params)
m.run()
# get outputs
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
示例5: test_injective_conv2d
def test_injective_conv2d():
channels = 16
data = sym.Variable(name="data")
pool = sym.global_avg_pool2d(data=data)
weight = sym.reshape(pool, shape=[1, channels, 1, 1])
residual = sym.conv2d(data=data, kernel_size=(3,3), channels=channels, padding=(1, 1),
layout="NCHW", kernel_layout="OIHW", use_bias=False, name="conv")
net = weight * data + residual
size = 56
dtype="float32"
dshape = (1, channels, size, size)
kshape = (channels, channels, 3, 3)
oshape = dshape
shape_dict = {"data": dshape}
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(net, target, shape_dict)
# data, global_avg_pool, conv weight, conv op, fused elemwise add
assert graph.index.num_nodes == 5
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
m = graph_runtime.create(graph, lib, ctx)
m.run(data=data, conv_weight=kernel)
# get output
out = m.get_output(0, tvm.nd.empty(oshape, dtype))
residual = topi.testing.conv2d_nchw_python(
data.asnumpy(), kernel.asnumpy(), (1,1), 'SAME')
weight = np.mean(data.asnumpy(), axis=(2, 3))
c_np = weight[:, :, np.newaxis, np.newaxis] * data.asnumpy() + residual
tvm.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)
示例6: test_forward_minimum
def test_forward_minimum():
a = mx.sym.var('a')
b = mx.sym.var('b')
dshape = (10, 20)
dtype = 'float32'
mx_sym = mx.sym._internal._minimum(a, b)
np_a = np.random.uniform(size=dshape).astype(dtype)
np_b = np.random.uniform(size=dshape).astype(dtype)
mx_a = mx.nd.array(np_a)
mx_b = mx.nd.array(np_b)
mod = mx.mod.Module(mx_sym, label_names=None, data_names=['a', 'b'])
mod.bind(data_shapes=[('a', dshape), ('b', dshape)], for_training=False)
mod.init_params()
args, auxs = mod.get_params()
mx_out = mx.nd._internal._minimum(mx_a, mx_b).asnumpy()
out_shape = dshape
new_sym, params = frontend.from_mxnet(mx_sym, args, auxs)
shape_dict = {'a': dshape, 'b': dshape}
for target, ctx in ctx_list():
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input("a", tvm.nd.array(np_a))
m.set_input("b", tvm.nd.array(np_b))
m.set_input(**params)
m.run()
# get outputs
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
示例7: _impl_v1
def _impl_v1(cls, inputs, attr, params):
if 'shape' in attr:
return _op.reshape(inputs[0], attr['shape'])
if get_name(inputs[1]) in params:
shape = tuple(params[inputs[1].name_hint].asnumpy())
out = _op.reshape(inputs[0], shape)
else:
# Try to infer shape by precompute prune if possible.
# TODO: good to check inputs to be in params.
# to be enhanced when relay support list_input_names API of NNVM
logging.warning("Infering Reshape argument by precompute")
func = _expr.Function(ir_pass.free_vars(inputs[1]), inputs[1])
with tvm.relay.build_config(opt_level=0):
graph, lib, params = tvm.relay.build(func, target="llvm", params=params)
ctx = tvm.context("llvm", 0)
from tvm.contrib import graph_runtime
m = graph_runtime.create(graph, lib, ctx)
m.set_input(**params)
m.run()
params_new = m.get_output(0)
inputs.pop(1)
out = _op.reshape(inputs[0], tuple(params_new.asnumpy().astype('int32').flatten()))
return out
示例8: test_conv_ewise_injective
def test_conv_ewise_injective():
x = sym.Variable("x")
y = sym.conv2d(x, channels=32, kernel_size=(3, 3), groups=32,
name="y", padding=(1,1))
y = sym.flatten(y + 1) + 1
dtype = "float32"
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
oshape = (1, 32* 18 * 18)
shape_dict = {"x": dshape}
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
m = graph_runtime.create(graph, lib, ctx)
# print(graph.ir(join_entry_attrs=["shape"]))
assert graph.index.num_nodes == 5
# set input
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))
m.run(x=data, y_weight=kernel, y_bias=bias)
# get output
out = m.get_output(0, tvm.nd.empty(oshape, dtype))
c_np = topi.testing.depthwise_conv2d_python_nchw(
data.asnumpy(), kernel.asnumpy(), (1,1), 'SAME')
c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1) + 1
c_np = c_np.reshape(c_np.shape[0], np.prod(c_np.shape[1:])) + 1
np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)
示例9: test_gru_like
def test_gru_like():
def unit(rnn_dim):
X = relay.var("X", shape=(1, rnn_dim))
W = relay.var("y", shape=(3 * rnn_dim, rnn_dim))
matmul = relay.nn.dense(X, W)
splitted = relay.split(matmul, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
return relay.Function([X, W], out)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def unit_numpy(X, W):
prod = np.dot(X, W.transpose())
splits = np.split(prod, indices_or_sections=3, axis=1)
return sigmoid(splits[0]) + np.tanh(splits[1]) * np.exp(splits[2])
dtype = "float32"
rnn_dim = 1000
x = np.random.rand(1, rnn_dim).astype(dtype)
y = np.random.rand(3*rnn_dim, rnn_dim).astype(dtype) * 0.01 - 0.005
out_shape = (1, rnn_dim)
z = unit(rnn_dim)
for target, ctx in ctx_list():
with relay.build_config(opt_level=2):
graph, lib, params = relay.build(z, target)
m = graph_runtime.create(graph, lib, ctx)
m.set_input("X", tvm.nd.array(x.astype(dtype)))
m.set_input("y", tvm.nd.array(y.astype(dtype)))
m.set_input(**params)
m.run()
out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
ref = unit_numpy(x, y)
tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
示例10: test_non_max_suppression
def test_non_max_suppression():
dshape = (1, 5, 6)
data = sym.Variable("data")
valid_count = sym.Variable("valid_count", dtype="int32")
iou_threshold = 0.7
force_suppress = True
top_k = 2
out = sym.non_max_suppression(data=data, valid_count=valid_count, return_indices=False,
iou_threshold=iou_threshold, force_suppress=force_suppress, top_k=top_k)
np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110]]]).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1]]])
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(out, target, {"data": dshape, "valid_count": (dshape[0],)},
dtype={"data": "float32", "valid_count": "int32"})
m = graph_runtime.create(graph, lib, ctx)
m.set_input(**{"data": np_data, "valid_count": np_valid_count})
m.run()
tvm_out = m.get_output(0, tvm.nd.empty(np_result.shape, "float32"))
tvm.testing.assert_allclose(tvm_out.asnumpy(), np_result, atol=1e-5, rtol=1e-5)
示例11: check_verify
def check_verify():
mod = graph_runtime.create(graph, mhost, ctx)
mod.set_input(**params)
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape))
np.testing.assert_equal(
out.asnumpy(), tensor_a + tensor_b - tensor_c + tensor_d)
示例12: run
def run(args):
onnx_model = onnx.load_model(os.path.join(args.test_dir, 'model.onnx'))
symbol, params = nnvm.frontend.from_onnx(onnx_model)
input_names = symbol.list_input_names()
output_names = symbol.list_output_names()
test_data_dir = os.path.join(args.test_dir, 'test_data_set_0')
inputs, outputs = load_test_data(test_data_dir, input_names, output_names)
inputs = dict(inputs)
# assert len(input_names) == len(inputs) + len(params)
# assert len(output_names) == len(outputs)
graph, lib, params = compile(
symbol, args.target, input_names, inputs, params,
args.opt_level, args.autotvm_log)
if args.dump_nnvm:
print(graph.ir())
print(graph.json())
ctx = tvm.gpu()
# Prepare inputs.
tvm_inputs = {}
for name, value in inputs.items():
tvm_inputs[name] = tvm.nd.array(value, ctx=ctx)
for name, value in params.items():
tvm_inputs[name] = tvm.nd.array(value, ctx=ctx)
graph_module = None
if args.debug:
try:
graph_module = debug_runtime.create(graph, lib, ctx)
except:
print('debug_runtime is disabled. '
'Set USE_GRAPH_RUNTIME_DEBUG=ON and rebuild TVM')
if graph_module is None:
graph_module = graph_runtime.create(graph, lib, ctx)
graph_module.set_input(**tvm_inputs)
graph_module.run()
for i, (name, expected) in enumerate(outputs):
tvm_output = tvm.nd.empty(expected.shape, expected.dtype, ctx=ctx)
actual = graph_module.get_output(i, tvm_output).asnumpy()
np.testing.assert_allclose(expected, actual,
rtol=1e-3, atol=1e-4), name
print('%s: OK' % name)
print('ALL OK')
if args.iterations > 1:
num_iterations = args.iterations - 1
start = time.time()
for t in range(num_iterations):
graph_module.run()
cupy.cuda.device.Device().synchronize()
elapsed = time.time() - start
print('Elapsed: %.3f msec' % (elapsed * 1000 / num_iterations))
示例13: test_multibox_transform_loc
def test_multibox_transform_loc():
batch_size = 1
num_anchors = 3
num_classes = 3
cls_prob = sym.Variable("cls_prob")
loc_preds = sym.Variable("loc_preds")
anchors = sym.Variable("anchors")
transform_loc_data, valid_count = sym.multibox_transform_loc(cls_prob=cls_prob, loc_pred=loc_preds,
anchor=anchors)
out = sym.non_max_suppression(data=transform_loc_data, valid_count=valid_count, return_indices=False)
# Manually create test case
np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]])
np_loc_preds = np.array([[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]])
np_anchors = np.array([[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]])
expected_np_out = np.array([[[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
[0, 0.44999999, 1, 1, 1, 1],
[0, 0.30000001, 0, 0, 0.22903419, 0.20435292]]])
dtype = "float32"
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(out, target, {"cls_prob": (batch_size, num_anchors, num_classes),
"loc_preds": (batch_size, num_anchors * 4),
"anchors": (1, num_anchors, 4)})
m = graph_runtime.create(graph, lib, ctx)
m.set_input(**{"cls_prob": np_cls_prob.astype(dtype), "loc_preds": np_loc_preds.astype(dtype), "anchors": np_anchors.astype(dtype)})
m.run()
tvm_out = m.get_output(0, tvm.nd.empty(expected_np_out.shape, dtype))
tvm.testing.assert_allclose(tvm_out.asnumpy(), expected_np_out, atol=1e-5, rtol=1e-5)
示例14: test_nms
def test_nms():
dshape = (1, 5, 6)
data = sym.Variable("data")
valid_count = sym.Variable("valid_count", dtype="int32")
nms_threshold = 0.7
force_suppress = True
nms_topk = 2
out = sym.nms(data=data, valid_count=valid_count, nms_threshold=nms_threshold,
force_suppress=force_suppress, nms_topk=nms_topk)
np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110]]]).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],
[0, 0.4, 4, 21, 19, 40], [-1, 0.9, 35, 61, 52, 79],
[-1, -1, -1, -1, -1, -1]]])
target = "llvm"
ctx = tvm.cpu()
graph, lib, _ = nnvm.compiler.build(out, target, {"data": dshape, "valid_count": (dshape[0],)},
dtype={"data": "float32", "valid_count": "int32"})
m = graph_runtime.create(graph, lib, ctx)
m.set_input(**{"data": np_data, "valid_count": np_valid_count})
m.run()
out = m.get_output(0, tvm.nd.empty(np_result.shape, "float32"))
tvm.testing.assert_allclose(out.asnumpy(), np_result, atol=1e-5, rtol=1e-5)
示例15: test_mixed_precision
def test_mixed_precision():
x = sym.Variable("x")
dtype = "int8"
out_dtype="int32"
y = sym.conv2d(x,
channels=10,
kernel_size=(3,3),
name="y",
padding=(1,1),
use_bias=False,
out_dtype="int32")
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
oshape = (1, 10, 18, 18)
shape_dict = {"x": dshape}
dtype_dict = {"x": dtype}
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, shape_dict, dtype_dict)
m = graph_runtime.create(graph, lib, ctx)
data = tvm.nd.array(np.random.uniform(-127, 127, size=dshape).astype(dtype))
kernel = tvm.nd.array(np.random.uniform(-127, 127, size=kshape).astype(dtype))
m.run(x=data, y_weight=kernel)
out = m.get_output(0, tvm.nd.empty(oshape, out_dtype))
c_np = topi.testing.conv2d_nchw_python(
data.asnumpy().astype(out_dtype),
kernel.asnumpy().astype(out_dtype), 1, 1)
tvm.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)