relay.Span
示例#
import tvm
from tvm import relay
span = relay.Span(None, 1, 2, 3, 4)
assert span.source_name == None
assert span.line == 1
assert span.end_line == 2
assert span.column == 3
assert span.end_column == 4
assert span.same_as(span)
assert span == span
assert isinstance(span, relay.base.Span)
str(span)
'Span((nullptr), 1, 2, 3, 4)'
back = tvm.ir.load_json(tvm.ir.save_json(span))
assert back.source_name == span.source_name
assert back.line == span.line
assert back.end_line == span.end_line
assert back.column == span.column
assert back.end_column == span.end_column
from tvm.relay import testing
import numpy as np
from tvm.relay import Expr
from tvm.relay.analysis import free_vars
def astext(program, unify_free_vars=False):
text = program.astext()
if isinstance(program, Expr):
roundtrip_program = tvm.relay.parse_expr(text)
else:
roundtrip_program = tvm.relay.fromtext(text)
tvm.ir.assert_structural_equal(roundtrip_program, program, map_free_vars=True)
return text
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add0"), 0, 0, 0, 0)
)
z = relay.add(z, z)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add1"), 0, 0, 0, 0)
)
f = relay.Function([x, y], z)
txt = astext(f)
assert "Add0" in txt
assert "Add1" in txt
# 参考:https://github.com/apache/tvm/blob/main/tests/python/relay/test_pass_annotate_spans_defuse.py
data = relay.var("data", relay.TensorType((1, 3, 64, 64), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
simple_net = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=3, padding=(1, 1)
)
simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0]
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
module, params = relay.testing.create_workload(simple_net)
print(module)
# 应用一些简单的通道使 IR 合法化
with tvm.transform.PassContext(opt_level=0):
module, params = relay.optimize(
module, target=tvm.testing.enabled_targets()[0][0], params=params
)
seq = tvm.transform.Sequential([relay.transform.AnnotateSpans(),
relay.transform.DefuseOps()])
with tvm.transform.PassContext(opt_level=3):
module = seq(module)
def @main(%data: Tensor[(1, 3, 64, 64), float32] /* ty=Tensor[(1, 3, 64, 64), float32] */, %weight: Tensor[(3, 3, 3, 3), float32] /* ty=Tensor[(3, 3, 3, 3), float32] */, %bn_gamma: Tensor[(3), float32] /* ty=Tensor[(3), float32] */, %bn_beta: Tensor[(3), float32] /* ty=Tensor[(3), float32] */, %bn_mean: Tensor[(3), float32] /* ty=Tensor[(3), float32] */, %bn_var: Tensor[(3), float32] /* ty=Tensor[(3), float32] */) -> Tensor[(1, 3, 64, 64), float32] {
%0 = nn.conv2d(%data, %weight, padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3]) /* ty=Tensor[(1, 3, 64, 64), float32] */;
%1 = nn.batch_norm(%0, %bn_gamma, %bn_beta, %bn_mean, %bn_var) /* ty=(Tensor[(1, 3, 64, 64), float32], Tensor[(3), float32], Tensor[(3), float32]) */;
%1.0 /* ty=Tensor[(1, 3, 64, 64), float32] */
}
[16:05:01] /media/pc/data/lxw/ai/tvm/src/target/target_kind.cc:181: Warning: Unable to detect CUDA version, default to "-arch=sm_50" instead
print(module)
def @main(%data {virtual_device=VirtualDevice(device_type=1, virtual_device_id=0, target=Target(id=39d9420, kind='llvm', keys={'cpu'}, host=Target(id=396cb40, kind='llvm', keys={'cpu'})))}: Tensor[(1, 3, 64, 64), float32] /* ty=Tensor[(1, 3, 64, 64), float32] span=GeneratedSource:21:11 */, hash="145b385fdff2c9c3", virtual_device=VirtualDevice(device_type=1, virtual_device_id=0, target=Target(id=39d9420, kind='llvm', keys={'cpu'}, host=Target(id=396cb40, kind='llvm', keys={'cpu'})))) -> Tensor[(1, 3, 64, 64), float32] {
%0 = add(meta[relay.Constant][1] /* ty=Tensor[(3), float32] span=GeneratedSource:9:16 */, 1e-05f /* ty=float32 span=GeneratedSource:9:72 */) /* ty=Tensor[(3), float32] span=GeneratedSource:7:5 */;
%1 = rsqrt(%0) /* ty=Tensor[(3), float32] span=GeneratedSource:11:5 */;
%2 = multiply(%1, meta[relay.Constant][2] /* ty=Tensor[(3), float32] span=GeneratedSource:17:20 */) /* ty=Tensor[(3), float32] span=GeneratedSource:15:6 */;
%3 = nn.conv2d(%data, meta[relay.Constant][0] /* ty=Tensor[(3, 3, 3, 3), float32] span=GeneratedSource:21:23 */, padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3]) /* ty=Tensor[(1, 3, 64, 64), float32] span=GeneratedSource:4:5 */;
%4 = expand_dims(%2, axis=1, num_newaxis=2) /* ty=Tensor[(3, 1, 1), float32] span=GeneratedSource:19:5 */;
%5 = negative(meta[relay.Constant][3] /* ty=Tensor[(3), float32] span=GeneratedSource:29:18 */) /* ty=Tensor[(3), float32] span=GeneratedSource:27:5 */;
%6 = multiply(%5, %2) /* ty=Tensor[(3), float32] span=GeneratedSource:31:6 */;
%7 = add(%6, meta[relay.Constant][4] /* ty=Tensor[(3), float32] span=GeneratedSource:37:23 */) /* ty=Tensor[(3), float32] span=GeneratedSource:35:5 */;
%8 = multiply(%3, %4) /* ty=Tensor[(1, 3, 64, 64), float32] span=GeneratedSource:24:6 */;
%9 = expand_dims(%7, axis=1, num_newaxis=2) /* ty=Tensor[(3, 1, 1), float32] span=GeneratedSource:39:5 */;
add(%8, %9) /* ty=Tensor[(1, 3, 64, 64), float32] span=GeneratedSource:44:5 */
}