AnnotateSpans

AnnotateSpans#

tvm.relay.transform.AnnotateSpans() 的作用是为程序添加跨度信息。具体来说,它首先生成程序的文本表示形式,然后将其解析回带有跨度信息的 Relay 抽象语法树(AST)。对模块进行美化打印,然后再将其解析回来,以便为所有 Relay 子表达式建立 spans(范围)和 sources(来源)。这有助于改善程序化构建的模块在下游的错误和调试诊断。

import testing
import torchvision
import torch
from tvm import relay
model = torchvision.models.resnet18().eval()
inp = torch.randn([1, 3, 224, 224])
trace = torch.jit.trace(model, inp).eval()
mod, _ = relay.frontend.from_pytorch(
    trace, [("input", inp.shape)], use_parser_friendly_name=True
)
mod = relay.transform.AnnotateSpans()(mod)
print(mod["main"])
fn (%input: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=GeneratedSource:116:18 */, %aten___convolution_0_weight: Tensor[(64, 3, 7, 7), float32] /* ty=Tensor[(64, 3, 7, 7), float32] span=GeneratedSource:116:28 */, %aten__batch_norm_0_weight: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:117:26 */, %aten__batch_norm_0_bias: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:117:54 */, %aten__batch_norm_0_mean: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:117:80 */, %aten__batch_norm_0_var: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:117:106 */, %aten___convolution_1_weight: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] span=GeneratedSource:121:22 */, %aten__batch_norm_1_weight: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:122:26 */, %aten__batch_norm_1_bias: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:122:54 */, %aten__batch_norm_1_mean: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:122:80 */, %aten__batch_norm_1_var: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:122:106 */, %aten___convolution_2_weight: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] span=GeneratedSource:125:22 */, %aten__batch_norm_2_weight: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:126:27 */, %aten__batch_norm_2_bias: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:126:55 */, %aten__batch_norm_2_mean: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:126:81 */, %aten__batch_norm_2_var: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:126:107 */, %aten___convolution_3_weight: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] span=GeneratedSource:130:24 */, %aten__batch_norm_3_weight: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:131:28 */, %aten__batch_norm_3_bias: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:131:56 */, %aten__batch_norm_3_mean: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:131:82 */, %aten__batch_norm_3_var: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:131:108 */, %aten___convolution_4_weight: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] span=GeneratedSource:134:24 */, %aten__batch_norm_4_weight: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:135:28 */, %aten__batch_norm_4_bias: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:135:56 */, %aten__batch_norm_4_mean: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:135:82 */, %aten__batch_norm_4_var: Tensor[(64), float32] /* ty=Tensor[(64), float32] span=GeneratedSource:135:108 */, %aten___convolution_5_weight: Tensor[(128, 64, 3, 3), float32] /* ty=Tensor[(128, 64, 3, 3), float32] span=GeneratedSource:139:24 */, %aten__batch_norm_5_weight: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:140:28 */, %aten__batch_norm_5_bias: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:140:56 */, %aten__batch_norm_5_mean: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:140:82 */, %aten__batch_norm_5_var: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:140:108 */, %aten___convolution_6_weight: Tensor[(128, 128, 3, 3), float32] /* ty=Tensor[(128, 128, 3, 3), float32] span=GeneratedSource:143:24 */, %aten__batch_norm_6_weight: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:144:28 */, %aten__batch_norm_6_bias: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:144:56 */, %aten__batch_norm_6_mean: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:144:82 */, %aten__batch_norm_6_var: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:144:108 */, %aten___convolution_7_weight: Tensor[(128, 64, 1, 1), float32] /* ty=Tensor[(128, 64, 1, 1), float32] span=GeneratedSource:145:24 */, %aten__batch_norm_7_weight: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:146:28 */, %aten__batch_norm_7_bias: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:146:56 */, %aten__batch_norm_7_mean: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:146:82 */, %aten__batch_norm_7_var: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:146:108 */, %aten___convolution_8_weight: Tensor[(128, 128, 3, 3), float32] /* ty=Tensor[(128, 128, 3, 3), float32] span=GeneratedSource:151:24 */, %aten__batch_norm_8_weight: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:152:28 */, %aten__batch_norm_8_bias: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:152:56 */, %aten__batch_norm_8_mean: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:152:82 */, %aten__batch_norm_8_var: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:152:108 */, %aten___convolution_9_weight: Tensor[(128, 128, 3, 3), float32] /* ty=Tensor[(128, 128, 3, 3), float32] span=GeneratedSource:155:24 */, %aten__batch_norm_9_weight: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:156:28 */, %aten__batch_norm_9_bias: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:156:56 */, %aten__batch_norm_9_mean: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:156:82 */, %aten__batch_norm_9_var: Tensor[(128), float32] /* ty=Tensor[(128), float32] span=GeneratedSource:156:108 */, %aten___convolution_10_weight: Tensor[(256, 128, 3, 3), float32] /* ty=Tensor[(256, 128, 3, 3), float32] span=GeneratedSource:160:24 */, %aten__batch_norm_10_weight: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:161:28 */, %aten__batch_norm_10_bias: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:161:57 */, %aten__batch_norm_10_mean: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:161:84 */, %aten__batch_norm_10_var: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:161:111 */, %aten___convolution_11_weight: Tensor[(256, 256, 3, 3), float32] /* ty=Tensor[(256, 256, 3, 3), float32] span=GeneratedSource:164:24 */, %aten__batch_norm_11_weight: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:165:28 */, %aten__batch_norm_11_bias: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:165:57 */, %aten__batch_norm_11_mean: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:165:84 */, %aten__batch_norm_11_var: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:165:111 */, %aten___convolution_12_weight: Tensor[(256, 128, 1, 1), float32] /* ty=Tensor[(256, 128, 1, 1), float32] span=GeneratedSource:166:24 */, %aten__batch_norm_12_weight: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:167:28 */, %aten__batch_norm_12_bias: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:167:57 */, %aten__batch_norm_12_mean: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:167:84 */, %aten__batch_norm_12_var: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:167:111 */, %aten___convolution_13_weight: Tensor[(256, 256, 3, 3), float32] /* ty=Tensor[(256, 256, 3, 3), float32] span=GeneratedSource:172:24 */, %aten__batch_norm_13_weight: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:173:28 */, %aten__batch_norm_13_bias: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:173:57 */, %aten__batch_norm_13_mean: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:173:84 */, %aten__batch_norm_13_var: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:173:111 */, %aten___convolution_14_weight: Tensor[(256, 256, 3, 3), float32] /* ty=Tensor[(256, 256, 3, 3), float32] span=GeneratedSource:176:24 */, %aten__batch_norm_14_weight: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:177:28 */, %aten__batch_norm_14_bias: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:177:57 */, %aten__batch_norm_14_mean: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:177:84 */, %aten__batch_norm_14_var: Tensor[(256), float32] /* ty=Tensor[(256), float32] span=GeneratedSource:177:111 */, %aten___convolution_15_weight: Tensor[(512, 256, 3, 3), float32] /* ty=Tensor[(512, 256, 3, 3), float32] span=GeneratedSource:181:24 */, %aten__batch_norm_15_weight: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:182:28 */, %aten__batch_norm_15_bias: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:182:57 */, %aten__batch_norm_15_mean: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:182:84 */, %aten__batch_norm_15_var: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:182:111 */, %aten___convolution_16_weight: Tensor[(512, 512, 3, 3), float32] /* ty=Tensor[(512, 512, 3, 3), float32] span=GeneratedSource:185:24 */, %aten__batch_norm_16_weight: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:186:28 */, %aten__batch_norm_16_bias: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:186:57 */, %aten__batch_norm_16_mean: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:186:84 */, %aten__batch_norm_16_var: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:186:111 */, %aten___convolution_17_weight: Tensor[(512, 256, 1, 1), float32] /* ty=Tensor[(512, 256, 1, 1), float32] span=GeneratedSource:187:24 */, %aten__batch_norm_17_weight: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:188:28 */, %aten__batch_norm_17_bias: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:188:57 */, %aten__batch_norm_17_mean: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:188:84 */, %aten__batch_norm_17_var: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:188:111 */, %aten___convolution_18_weight: Tensor[(512, 512, 3, 3), float32] /* ty=Tensor[(512, 512, 3, 3), float32] span=GeneratedSource:193:24 */, %aten__batch_norm_18_weight: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:194:28 */, %aten__batch_norm_18_bias: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:194:57 */, %aten__batch_norm_18_mean: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:194:84 */, %aten__batch_norm_18_var: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:194:111 */, %aten___convolution_19_weight: Tensor[(512, 512, 3, 3), float32] /* ty=Tensor[(512, 512, 3, 3), float32] span=GeneratedSource:197:24 */, %aten__batch_norm_19_weight: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:198:28 */, %aten__batch_norm_19_bias: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:198:57 */, %aten__batch_norm_19_mean: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:198:84 */, %aten__batch_norm_19_var: Tensor[(512), float32] /* ty=Tensor[(512), float32] span=GeneratedSource:198:111 */, %aten__linear_0_weight: Tensor[(1000, 512), float32] /* ty=Tensor[(1000, 512), float32] span=GeneratedSource:205:23 */, %aten__linear_0_bias: Tensor[(1000), float32] /* ty=Tensor[(1000), float32] span=GeneratedSource:206:20 */) -> Tensor[(1, 1000), float32] {
  %0 = nn.conv2d(%input, %aten___convolution_0_weight, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] span=GeneratedSource:117:22 */;
  %1 = nn.batch_norm(%0, %aten__batch_norm_0_weight, %aten__batch_norm_0_bias, %aten__batch_norm_0_mean, %aten__batch_norm_0_var) /* ty=(Tensor[(1, 64, 112, 112), float32], Tensor[(64), float32], Tensor[(64), float32]) span=GeneratedSource:118:8 */;
  %2 = %1.0 /* ty=Tensor[(1, 64, 112, 112), float32] span=GeneratedSource:119:16 */;
  %3 = nn.relu(%2) /* ty=Tensor[(1, 64, 112, 112), float32] span=GeneratedSource:120:23 */;
  %4 = nn.max_pool2d(%3, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:128:18 */;
  %5 = nn.conv2d(%4, %aten___convolution_1_weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:122:22 */;
  %6 = nn.batch_norm(%5, %aten__batch_norm_1_weight, %aten__batch_norm_1_bias, %aten__batch_norm_1_mean, %aten__batch_norm_1_var) /* ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) span=GeneratedSource:123:8 */;
  %7 = %6.0 /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:124:16 */;
  %8 = nn.relu(%7) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:125:18 */;
  %9 = nn.conv2d(%8, %aten___convolution_2_weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:126:23 */;
  %10 = nn.batch_norm(%9, %aten__batch_norm_2_weight, %aten__batch_norm_2_bias, %aten__batch_norm_2_mean, %aten__batch_norm_2_var) /* ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) span=GeneratedSource:127:9 */;
  %11 = %10.0 /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:128:13 */;
  %12 = add(%11, %4) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:129:17 */;
  %13 = nn.relu(%12) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:137:18 */;
  %14 = nn.conv2d(%13, %aten___convolution_3_weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:131:23 */;
  %15 = nn.batch_norm(%14, %aten__batch_norm_3_weight, %aten__batch_norm_3_bias, %aten__batch_norm_3_mean, %aten__batch_norm_3_var) /* ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) span=GeneratedSource:132:9 */;
  %16 = %15.0 /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:133:17 */;
  %17 = nn.relu(%16) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:134:19 */;
  %18 = nn.conv2d(%17, %aten___convolution_4_weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:135:23 */;
  %19 = nn.batch_norm(%18, %aten__batch_norm_4_weight, %aten__batch_norm_4_bias, %aten__batch_norm_4_mean, %aten__batch_norm_4_var) /* ty=(Tensor[(1, 64, 56, 56), float32], Tensor[(64), float32], Tensor[(64), float32]) span=GeneratedSource:136:9 */;
  %20 = %19.0 /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:137:13 */;
  %21 = add(%20, %13) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:138:17 */;
  %22 = nn.relu(%21) /* ty=Tensor[(1, 64, 56, 56), float32] span=GeneratedSource:145:19 */;
  %23 = nn.conv2d(%22, %aten___convolution_5_weight, strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:140:23 */;
  %24 = nn.batch_norm(%23, %aten__batch_norm_5_weight, %aten__batch_norm_5_bias, %aten__batch_norm_5_mean, %aten__batch_norm_5_var) /* ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) span=GeneratedSource:141:9 */;
  %25 = %24.0 /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:142:17 */;
  %26 = nn.relu(%25) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:143:19 */;
  %27 = nn.conv2d(%26, %aten___convolution_6_weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:144:23 */;
  %28 = nn.batch_norm(%27, %aten__batch_norm_6_weight, %aten__batch_norm_6_bias, %aten__batch_norm_6_mean, %aten__batch_norm_6_var) /* ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) span=GeneratedSource:147:9 */;
  %29 = nn.conv2d(%22, %aten___convolution_7_weight, strides=[2, 2], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:146:23 */;
  %30 = nn.batch_norm(%29, %aten__batch_norm_7_weight, %aten__batch_norm_7_bias, %aten__batch_norm_7_mean, %aten__batch_norm_7_var) /* ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) span=GeneratedSource:148:9 */;
  %31 = %28.0 /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:149:13 */;
  %32 = %30.0 /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:149:18 */;
  %33 = add(%31, %32) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:150:17 */;
  %34 = nn.relu(%33) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:158:18 */;
  %35 = nn.conv2d(%34, %aten___convolution_8_weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:152:23 */;
  %36 = nn.batch_norm(%35, %aten__batch_norm_8_weight, %aten__batch_norm_8_bias, %aten__batch_norm_8_mean, %aten__batch_norm_8_var) /* ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) span=GeneratedSource:153:9 */;
  %37 = %36.0 /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:154:17 */;
  %38 = nn.relu(%37) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:155:19 */;
  %39 = nn.conv2d(%38, %aten___convolution_9_weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:156:23 */;
  %40 = nn.batch_norm(%39, %aten__batch_norm_9_weight, %aten__batch_norm_9_bias, %aten__batch_norm_9_mean, %aten__batch_norm_9_var) /* ty=(Tensor[(1, 128, 28, 28), float32], Tensor[(128), float32], Tensor[(128), float32]) span=GeneratedSource:157:9 */;
  %41 = %40.0 /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:158:13 */;
  %42 = add(%41, %34) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:159:17 */;
  %43 = nn.relu(%42) /* ty=Tensor[(1, 128, 28, 28), float32] span=GeneratedSource:166:19 */;
  %44 = nn.conv2d(%43, %aten___convolution_10_weight, strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:161:23 */;
  %45 = nn.batch_norm(%44, %aten__batch_norm_10_weight, %aten__batch_norm_10_bias, %aten__batch_norm_10_mean, %aten__batch_norm_10_var) /* ty=(Tensor[(1, 256, 14, 14), float32], Tensor[(256), float32], Tensor[(256), float32]) span=GeneratedSource:162:9 */;
  %46 = %45.0 /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:163:17 */;
  %47 = nn.relu(%46) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:164:19 */;
  %48 = nn.conv2d(%47, %aten___convolution_11_weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:165:23 */;
  %49 = nn.batch_norm(%48, %aten__batch_norm_11_weight, %aten__batch_norm_11_bias, %aten__batch_norm_11_mean, %aten__batch_norm_11_var) /* ty=(Tensor[(1, 256, 14, 14), float32], Tensor[(256), float32], Tensor[(256), float32]) span=GeneratedSource:168:9 */;
  %50 = nn.conv2d(%43, %aten___convolution_12_weight, strides=[2, 2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:167:23 */;
  %51 = nn.batch_norm(%50, %aten__batch_norm_12_weight, %aten__batch_norm_12_bias, %aten__batch_norm_12_mean, %aten__batch_norm_12_var) /* ty=(Tensor[(1, 256, 14, 14), float32], Tensor[(256), float32], Tensor[(256), float32]) span=GeneratedSource:169:9 */;
  %52 = %49.0 /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:170:13 */;
  %53 = %51.0 /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:170:18 */;
  %54 = add(%52, %53) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:171:17 */;
  %55 = nn.relu(%54) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:179:18 */;
  %56 = nn.conv2d(%55, %aten___convolution_13_weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:173:23 */;
  %57 = nn.batch_norm(%56, %aten__batch_norm_13_weight, %aten__batch_norm_13_bias, %aten__batch_norm_13_mean, %aten__batch_norm_13_var) /* ty=(Tensor[(1, 256, 14, 14), float32], Tensor[(256), float32], Tensor[(256), float32]) span=GeneratedSource:174:9 */;
  %58 = %57.0 /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:175:17 */;
  %59 = nn.relu(%58) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:176:19 */;
  %60 = nn.conv2d(%59, %aten___convolution_14_weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:177:23 */;
  %61 = nn.batch_norm(%60, %aten__batch_norm_14_weight, %aten__batch_norm_14_bias, %aten__batch_norm_14_mean, %aten__batch_norm_14_var) /* ty=(Tensor[(1, 256, 14, 14), float32], Tensor[(256), float32], Tensor[(256), float32]) span=GeneratedSource:178:9 */;
  %62 = %61.0 /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:179:13 */;
  %63 = add(%62, %55) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:180:17 */;
  %64 = nn.relu(%63) /* ty=Tensor[(1, 256, 14, 14), float32] span=GeneratedSource:187:19 */;
  %65 = nn.conv2d(%64, %aten___convolution_15_weight, strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:182:23 */;
  %66 = nn.batch_norm(%65, %aten__batch_norm_15_weight, %aten__batch_norm_15_bias, %aten__batch_norm_15_mean, %aten__batch_norm_15_var) /* ty=(Tensor[(1, 512, 7, 7), float32], Tensor[(512), float32], Tensor[(512), float32]) span=GeneratedSource:183:9 */;
  %67 = %66.0 /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:184:17 */;
  %68 = nn.relu(%67) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:185:19 */;
  %69 = nn.conv2d(%68, %aten___convolution_16_weight, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:186:23 */;
  %70 = nn.batch_norm(%69, %aten__batch_norm_16_weight, %aten__batch_norm_16_bias, %aten__batch_norm_16_mean, %aten__batch_norm_16_var) /* ty=(Tensor[(1, 512, 7, 7), float32], Tensor[(512), float32], Tensor[(512), float32]) span=GeneratedSource:189:9 */;
  %71 = nn.conv2d(%64, %aten___convolution_17_weight, strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:188:23 */;
  %72 = nn.batch_norm(%71, %aten__batch_norm_17_weight, %aten__batch_norm_17_bias, %aten__batch_norm_17_mean, %aten__batch_norm_17_var) /* ty=(Tensor[(1, 512, 7, 7), float32], Tensor[(512), float32], Tensor[(512), float32]) span=GeneratedSource:190:9 */;
  %73 = %70.0 /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:191:13 */;
  %74 = %72.0 /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:191:18 */;
  %75 = add(%73, %74) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:192:17 */;
  %76 = nn.relu(%75) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:200:18 */;
  %77 = nn.conv2d(%76, %aten___convolution_18_weight, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:194:23 */;
  %78 = nn.batch_norm(%77, %aten__batch_norm_18_weight, %aten__batch_norm_18_bias, %aten__batch_norm_18_mean, %aten__batch_norm_18_var) /* ty=(Tensor[(1, 512, 7, 7), float32], Tensor[(512), float32], Tensor[(512), float32]) span=GeneratedSource:195:9 */;
  %79 = %78.0 /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:196:17 */;
  %80 = nn.relu(%79) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:197:19 */;
  %81 = nn.conv2d(%80, %aten___convolution_19_weight, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:198:23 */;
  %82 = nn.batch_norm(%81, %aten__batch_norm_19_weight, %aten__batch_norm_19_bias, %aten__batch_norm_19_mean, %aten__batch_norm_19_var) /* ty=(Tensor[(1, 512, 7, 7), float32], Tensor[(512), float32], Tensor[(512), float32]) span=GeneratedSource:199:9 */;
  %83 = %82.0 /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:200:13 */;
  %84 = add(%83, %76) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:201:17 */;
  %85 = nn.relu(%84) /* ty=Tensor[(1, 512, 7, 7), float32] span=GeneratedSource:202:32 */;
  %86 = nn.adaptive_avg_pool2d(%85, output_size=[1, 1]) /* ty=Tensor[(1, 512, 1, 1), float32] span=GeneratedSource:203:17 */;
  %87 = reshape(%86, newshape=[0, -1, 1, 1]) /* ty=Tensor[(1, 512, 1, 1), float32] span=GeneratedSource:204:17 */;
  %88 = squeeze(%87, axis=[2, 3]) /* ty=Tensor[(1, 512), float32] span=GeneratedSource:205:18 */;
  %89 = nn.dense(%88, %aten__linear_0_weight, units=None) /* ty=Tensor[(1, 1000), float32] span=GeneratedSource:206:15 */;
  nn.bias_add(%89, %aten__linear_0_bias, axis=-1) /* ty=Tensor[(1, 1000), float32] span=GeneratedSource:116:3 */
} /* ty=fn (Tensor[(1, 3, 224, 224), float32], Tensor[(64, 3, 7, 7), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(64), float32], Tensor[(128, 64, 3, 3), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128, 128, 3, 3), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128, 64, 1, 1), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128, 128, 3, 3), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128, 128, 3, 3), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(128), float32], Tensor[(256, 128, 3, 3), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256, 256, 3, 3), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256, 128, 1, 1), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256, 256, 3, 3), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256, 256, 3, 3), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(256), float32], Tensor[(512, 256, 3, 3), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512, 512, 3, 3), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512, 256, 1, 1), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512, 512, 3, 3), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512, 512, 3, 3), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(512), float32], Tensor[(1000, 512), float32], Tensor[(1000), float32]) -> Tensor[(1, 1000), float32] span=GeneratedSource:115:1 */
import numpy as np
import tvm
x = relay.var("x", shape=(3, 4), dtype="float32")
y = relay.clip(x, -np.inf, np.inf)

f = relay.Function([x], y)
mod = tvm.IRModule.from_expr(f)

mod = relay.transform.AnnotateSpans()(mod)
mod.show()
def @main(%x: Tensor[(3, 4), float32] /* ty=Tensor[(3, 4), float32] span=GeneratedSource:3:8 */) -> Tensor[(3, 4), float32] {
  clip(%x, a_min=-inff, a_max=inff) /* ty=Tensor[(3, 4), float32] span=GeneratedSource:3:3 */
}
import tvm
import tvm.relay as relay
from tvm.relay import testing
import tvm.testing


def test_annotate_spans_compatibility():
    data = relay.var("data", relay.TensorType((1, 3, 64, 64), "float32"))
    weight = relay.var("weight")

    bn_gamma = relay.var("bn_gamma")
    bn_beta = relay.var("bn_beta")
    bn_mmean = relay.var("bn_mean")
    bn_mvar = relay.var("bn_var")

    simple_net = relay.nn.conv2d(
        data=data, weight=weight, kernel_size=(3, 3), channels=3, padding=(1, 1)
    )
    simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0]
    simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)

    module, params = testing.create_workload(simple_net)

    # Apply some simple passes to legalize the IR.
    with tvm.transform.PassContext(opt_level=0):
        module, params = relay.optimize(
            module, target=tvm.testing.enabled_targets()[0][0], params=params
        )

    seq = tvm.transform.Sequential([relay.transform.AnnotateSpans(), relay.transform.DefuseOps()])
    with tvm.transform.PassContext(opt_level=3):
        module = seq(module)

test_annotate_spans_compatibility()
[11:36:37] /media/pc/data/lxw/ai/tvm/src/target/target_kind.cc:171: Warning: Unable to detect CUDA version, default to "-arch=sm_50" instead
import tvm
import tvm.relay
from tvm.relay import op
from tvm.ir.instrument import PassTimingInstrument, pass_instrument

def get_test_model():
    x, y, z = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xyz"]
    e1 = op.add(x, y)
    e2 = op.subtract(x, z)
    e3 = op.multiply(e1, e1 / e2)
    return tvm.IRModule.from_expr(e3 + e2)

def test_pass_timing_instrument():
    pass_timing = PassTimingInstrument()

    # Override current PassContext's instruments
    tvm.transform.PassContext.current().override_instruments([pass_timing])

    mod = get_test_model()
    mod = tvm.relay.transform.AnnotateSpans()(mod)
    mod = tvm.relay.transform.ToANormalForm()(mod)
    mod = tvm.relay.transform.InferType()(mod)

    profiles = pass_timing.render()
    assert "AnnotateSpans" in profiles
    assert "ToANormalForm" in profiles
    assert "InferType" in profiles

    # Reset current PassContext's instruments to None
    tvm.transform.PassContext.current().override_instruments(None)

    mod = get_test_model()
    mod = tvm.relay.transform.AnnotateSpans()(mod)
    mod = tvm.relay.transform.ToANormalForm()(mod)
    mod = tvm.relay.transform.InferType()(mod)

    profiles = pass_timing.render()
    assert profiles == ""