本文整理汇总了Python中tvm.relay.create_executor函数的典型用法代码示例。如果您正苦于以下问题:Python create_executor函数的具体用法?Python create_executor怎么用?Python create_executor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了create_executor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: verify_infer_type_prelu
def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = relay.ir_pass.infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.expr.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data>=0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data>=0) * x_data
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
示例2: test_flatten_infer_type
def test_flatten_infer_type():
d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
y = relay.nn.batch_flatten(x)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32")
x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((3, 24), "float32")
x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32")
shape = (1, 5, 10, 10)
o_shape = (1, 500)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.batch_flatten(x)
yy = relay.ir_pass.infer_type(z)
assert yy.checked_type == relay.TensorType(o_shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = x_data.flatten().reshape(o_shape)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
示例3: test_pass_run
def test_pass_run():
function_pass = transform
assert pass_name in function_pass.astext()
updated_mod = function_pass(mod)
assert isinstance(updated_mod, relay.Module)
# Check the log function in the updated module.
new_v_log = updated_mod.get_global_var(v_log.name_hint)
new_log = updated_mod[new_v_log]
check_func(new_log, get_ref_log())
# Check the log function in the python transformed function.
ret = opt_tester.transform(log, pass_ctx)
check_func(new_log, ret)
# Execute the add function.
x_nd = get_rand(shape, dtype)
ref_res = np.log(x_nd.asnumpy() * 2)
for target, ctx in ctx_list():
exe1 = relay.create_executor("graph", ctx=ctx, target=target)
exe2 = relay.create_executor("debug", ctx=ctx, target=target)
res1 = exe1.evaluate(new_log)(x_nd)
tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
res2 = exe2.evaluate(new_log)(x_nd)
tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)
示例4: test_infer_type_leaky_relu
def test_infer_type_leaky_relu():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
yy = relay.ir_pass.infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
示例5: verify_roi_pool
def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale):
data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
z = relay.vision.roi_pool(data, rois, pooled_size=(pooled_size, pooled_size),
spatial_scale=spatial_scale, layout="NCHW")
zz = relay.ir_pass.infer_type(z)
batch, channel, in_size, _ = data_shape
num_roi = rois_shape[0]
assert zz.checked_type == relay.ty.TensorType(
(num_roi, channel, pooled_size, pooled_size), "float32")
func = relay.Function([data, rois], z)
func = relay.ir_pass.infer_type(func)
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size
np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')
ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,
spatial_scale=spatial_scale)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(np_data, np_rois)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res2 = intrp2.evaluate(func)(np_data, np_rois)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4)
示例6: test_lrn
def test_lrn():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)
"alpha=" in y.astext()
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
size=5
axis=1
bias=0.5
alpha=.00001
beta=0.75
z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
yy = relay.ir_pass.infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
示例7: test_l2_normalize
def test_l2_normalize():
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
"axis=" in y.astext()
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
eps=0.001
axis=1
z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
yy = relay.ir_pass.infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
示例8: verify_multibox_prior
def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,),
ratios=(1.0,), steps=(-1.0, -1.0),
offsets=(0.5, 0.5), clip=True, check_size=False,
check_type_only=False):
z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip)
zz = relay.ir_pass.infer_type(z)
if check_size:
assert "sizes=" in z.astext()
assert zz.checked_type == relay.TensorType(
(1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4),
"float32")
if check_type_only:
return
data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
func = relay.Function([x], z)
func = relay.ir_pass.infer_type(func)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res2 = intrp2.evaluate(func)(data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
示例9: test_forward_scalar_ops
def test_forward_scalar_ops():
for op in [operator.add, operator.sub, operator.mul, operator.truediv,
operator.pow, operator.lt, operator.le, operator.eq,
operator.ne, operator.gt, operator.ge]:
dtype='float32'
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = op(mx.sym.var('a'), b_scalar)
ref_res = op(mx.nd.array(a_np), b_scalar)
shapes = {'a': a_shape}
new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(a_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
for op in ["maximum", "minimum"]:
dtype='float32'
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), b_scalar])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), b_scalar])
shapes = {'a': a_shape}
new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(a_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
示例10: test_default_value
def test_default_value():
num_anchors = 3
num_classes = 3
np_cls_prob = np.array(
[[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45],
[0.7, 0.1, 0.2]]]).astype("float32")
np_loc_preds = np.array(
[[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4,
-0.8]]).astype("float32")
np_anchors = np.array(
[[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2],
[1.2, 1.2, 1.5, 1.5]]]).astype("float32")
expected_np_out = np.array([[[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
[0, 0.44999999, 1, 1, 1, 1],
[0, 0.30000001, 0, 0, 0.22903419, 0.20435292]]])
cls_prob = relay.var(
"cls_prob",
relay.ty.TensorType((1, num_anchors, num_classes), "float32"))
loc_pred = relay.var(
"loc_pred", relay.ty.TensorType((1, num_anchors * 4), "float32"))
anchors = relay.var(
"anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
mtl = relay.vision.multibox_transform_loc(
cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors)
ret = relay.ir_pass.infer_type(mtl.astuple())
ref_type = relay.ty.TupleType(
tvm.convert([
relay.ty.TensorType((1, num_anchors, 6), "float32"),
relay.ty.TensorType((1, ), "int")
]))
assert ret.checked_type == ref_type
nms = relay.vision.non_max_suppression(mtl[0], mtl[1], return_indices=False)
func = relay.Function([cls_prob, loc_pred, anchors], nms)
func = relay.ir_pass.infer_type(func)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(np_cls_prob, np_loc_preds,
np_anchors)
tvm.testing.assert_allclose(op_res1.asnumpy(), expected_np_out, rtol=1e-5)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res2 = intrp2.evaluate(func)(np_cls_prob, np_loc_preds,
np_anchors)
tvm.testing.assert_allclose(op_res2.asnumpy(), expected_np_out, rtol=1e-5)
示例11: veval
def veval(f, *args, ctx=tvm.cpu()):
if isinstance(f, relay.Expr):
ex = relay.create_executor('vm', mod=relay.Module(), ctx=ctx)
if len(args) == 0:
return ex.evaluate(f)
else:
return ex.evaluate(f)(*args)
else:
assert isinstance(f, relay.Module), "expected expression or module"
mod = f
ex = relay.create_executor('vm', mod=mod, ctx=ctx)
if len(args) == 0:
return ex.evaluate(mod[mod.entry_func])
else:
return ex.evaluate(mod[mod.entry_func])(*args)
示例12: check_binary_op
def check_binary_op(opfunc, ref):
# TODO(@jroesch): this piece of code improperly uses type variables.
n = tvm.var("n")
s1 = (5, n, 5)
s2 = (n, 1)
t1 = relay.TensorType(s1)
t2 = relay.TensorType(s2)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
# test printer
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
assert relay.ir_pass.infer_type(z).checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, ctx in ctx_list():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
示例13: verify_get_valid_counts
def verify_get_valid_counts(dshape, score_threshold):
dtype = "float32"
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=dshape).astype(dtype)
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
score = np_data[i, j, 1]
if score >= score_threshold:
for k in range(elem_length):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
z = relay.vision.get_valid_counts(x, score_threshold)
assert "score_threshold" in z.astext()
func = relay.Function([x], z.astuple())
func = relay.ir_pass.infer_type(func)
for target, ctx in ctx_list():
if target == 'cuda':
return
intrp = relay.create_executor("debug", ctx=ctx, target=target)
out = intrp.evaluate(func)(np_data)
tvm.testing.assert_allclose(out[0].asnumpy(), np_out1, rtol=1e-3, atol=1e-04)
tvm.testing.assert_allclose(out[1].asnumpy(), np_out2, rtol=1e-3, atol=1e-04)
示例14: test_forward_where
def test_forward_where():
cond = mx.sym.var('cond')
x = mx.sym.var('x')
y = mx.sym.var('y')
dshape = (2, 2)
dtype = 'float32'
mx_sym = mx.sym.where(cond, x, y)
np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
np_x = np.random.uniform(size=dshape).astype(dtype)
np_y = np.random.uniform(size=dshape).astype(dtype)
mx_cond = mx.nd.array(np_cond)
mx_x = mx.nd.array(np_x)
mx_y = mx.nd.array(np_y)
shapes = {'cond': dshape, 'x': dshape, 'y': dshape}
mod = mx.mod.Module(mx_sym, label_names=None, data_names=['cond', 'x', 'y'])
mod.bind(data_shapes=shapes.items(), for_training=False)
mod.init_params()
args, auxs = mod.get_params()
mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()
new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, args, auxs)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(new_sym)(np_cond, np_x, np_y)
tvm.testing.assert_allclose(op_res.asnumpy(), mx_out)
示例15: _test_upsampling
def _test_upsampling(layout, method):
n, c, h, w = tvm.var("n"), 16, 32, 32
scale = 2
dtype = "float32"
def get_shape():
if layout == "NCHW":
return (c, h, w), (c, h*scale, w*scale)
else:
return (h, w, c), (h*scale, w*scale, c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
yy = relay.ir_pass.infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "NEAREST_NEIGHBOR":
ref = topi.testing.upsampling_python(data, scale, layout)
else:
ref = topi.testing.bilinear_resize_python(data, (h*scale, w*scale), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
out = executor.evaluate(func)(data)
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)