PyTorch resnet18 Relay

PyTorch resnet18 Relay#

import set_env
import torch
from torchvision import models
import tvm
from tvm import relay

# 创建 PyTorch 模型
model = models.resnet18(weights=models.ResNet18_Weights.DEFAULT).eval()
shape = [1, 3, 224, 224]
input_infos = [("data", shape)]
# 变换为 Relay 模型
with torch.no_grad():
    torch_model = torch.jit.trace(model, torch.randn(shape)).eval()
    mod, params = relay.frontend.from_pytorch(torch_model, input_infos)
# 绑定参数并优化模型
with tvm.transform.PassContext(opt_level=3):
    mod = relay.quantize.prerequisite_optimize(mod, params)
print(mod["main"])
fn (%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 1000), float32] {
  %0 = nn.conv2d(%data, meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] */;
  %1 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 112, 112), float32] */;
  %2 = nn.relu(%1) /* ty=Tensor[(1, 64, 112, 112), float32] span=aten::relu__0:0:0 */;
  %3 = nn.max_pool2d(%2, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::max_pool2d_0:0:0 */;
  %4 = nn.conv2d(%3, meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  %5 = add(%4, meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  %6 = nn.relu(%5) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__1:0:0 */;
  %7 = nn.conv2d(%6, meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  %8 = add(%7, meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  %9 = add(%8, %3) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__0:0:0 */;
  %10 = nn.relu(%9) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__2:0:0 */;
  %11 = nn.conv2d(%10, meta[relay.Constant][6] /* ty=Tensor[(64, 64, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  %12 = add(%11, meta[relay.Constant][7] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  %13 = nn.relu(%12) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__3:0:0 */;
  %14 = nn.conv2d(%13, meta[relay.Constant][8] /* ty=Tensor[(64, 64, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  %15 = add(%14, meta[relay.Constant][9] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
  %16 = add(%15, %10) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__1:0:0 */;
  %17 = nn.relu(%16) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__4:0:0 */;
  %18 = nn.conv2d(%17, meta[relay.Constant][10] /* ty=Tensor[(128, 64, 3, 3), float32] */, strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %19 = add(%18, meta[relay.Constant][11] /* ty=Tensor[(128, 1, 1), float32] */) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %20 = nn.relu(%19) /* ty=Tensor[(1, 128, 28, 28), float32] span=aten::relu__5:0:0 */;
  %21 = nn.conv2d(%20, meta[relay.Constant][12] /* ty=Tensor[(128, 128, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %22 = nn.conv2d(%17, meta[relay.Constant][14] /* ty=Tensor[(128, 64, 1, 1), float32] */, strides=[2, 2], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %23 = add(%21, meta[relay.Constant][13] /* ty=Tensor[(128, 1, 1), float32] */) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %24 = add(%22, meta[relay.Constant][15] /* ty=Tensor[(128, 1, 1), float32] */) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %25 = add(%23, %24) /* ty=Tensor[(1, 128, 28, 28), float32] span=aten::add__2:0:0 */;
  %26 = nn.relu(%25) /* ty=Tensor[(1, 128, 28, 28), float32] span=aten::relu__6:0:0 */;
  %27 = nn.conv2d(%26, meta[relay.Constant][16] /* ty=Tensor[(128, 128, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %28 = add(%27, meta[relay.Constant][17] /* ty=Tensor[(128, 1, 1), float32] */) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %29 = nn.relu(%28) /* ty=Tensor[(1, 128, 28, 28), float32] span=aten::relu__7:0:0 */;
  %30 = nn.conv2d(%29, meta[relay.Constant][18] /* ty=Tensor[(128, 128, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %31 = add(%30, meta[relay.Constant][19] /* ty=Tensor[(128, 1, 1), float32] */) /* ty=Tensor[(1, 128, 28, 28), float32] */;
  %32 = add(%31, %26) /* ty=Tensor[(1, 128, 28, 28), float32] span=aten::add__3:0:0 */;
  %33 = nn.relu(%32) /* ty=Tensor[(1, 128, 28, 28), float32] span=aten::relu__8:0:0 */;
  %34 = nn.conv2d(%33, meta[relay.Constant][20] /* ty=Tensor[(256, 128, 3, 3), float32] */, strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %35 = add(%34, meta[relay.Constant][21] /* ty=Tensor[(256, 1, 1), float32] */) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %36 = nn.relu(%35) /* ty=Tensor[(1, 256, 14, 14), float32] span=aten::relu__9:0:0 */;
  %37 = nn.conv2d(%36, meta[relay.Constant][22] /* ty=Tensor[(256, 256, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %38 = nn.conv2d(%33, meta[relay.Constant][24] /* ty=Tensor[(256, 128, 1, 1), float32] */, strides=[2, 2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %39 = add(%37, meta[relay.Constant][23] /* ty=Tensor[(256, 1, 1), float32] */) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %40 = add(%38, meta[relay.Constant][25] /* ty=Tensor[(256, 1, 1), float32] */) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %41 = add(%39, %40) /* ty=Tensor[(1, 256, 14, 14), float32] span=aten::add__4:0:0 */;
  %42 = nn.relu(%41) /* ty=Tensor[(1, 256, 14, 14), float32] span=aten::relu__10:0:0 */;
  %43 = nn.conv2d(%42, meta[relay.Constant][26] /* ty=Tensor[(256, 256, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %44 = add(%43, meta[relay.Constant][27] /* ty=Tensor[(256, 1, 1), float32] */) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %45 = nn.relu(%44) /* ty=Tensor[(1, 256, 14, 14), float32] span=aten::relu__11:0:0 */;
  %46 = nn.conv2d(%45, meta[relay.Constant][28] /* ty=Tensor[(256, 256, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %47 = add(%46, meta[relay.Constant][29] /* ty=Tensor[(256, 1, 1), float32] */) /* ty=Tensor[(1, 256, 14, 14), float32] */;
  %48 = add(%47, %42) /* ty=Tensor[(1, 256, 14, 14), float32] span=aten::add__5:0:0 */;
  %49 = nn.relu(%48) /* ty=Tensor[(1, 256, 14, 14), float32] span=aten::relu__12:0:0 */;
  %50 = nn.conv2d(%49, meta[relay.Constant][30] /* ty=Tensor[(512, 256, 3, 3), float32] */, strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %51 = add(%50, meta[relay.Constant][31] /* ty=Tensor[(512, 1, 1), float32] */) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %52 = nn.relu(%51) /* ty=Tensor[(1, 512, 7, 7), float32] span=aten::relu__13:0:0 */;
  %53 = nn.conv2d(%52, meta[relay.Constant][32] /* ty=Tensor[(512, 512, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %54 = nn.conv2d(%49, meta[relay.Constant][34] /* ty=Tensor[(512, 256, 1, 1), float32] */, strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %55 = add(%53, meta[relay.Constant][33] /* ty=Tensor[(512, 1, 1), float32] */) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %56 = add(%54, meta[relay.Constant][35] /* ty=Tensor[(512, 1, 1), float32] */) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %57 = add(%55, %56) /* ty=Tensor[(1, 512, 7, 7), float32] span=aten::add__6:0:0 */;
  %58 = nn.relu(%57) /* ty=Tensor[(1, 512, 7, 7), float32] span=aten::relu__14:0:0 */;
  %59 = nn.conv2d(%58, meta[relay.Constant][36] /* ty=Tensor[(512, 512, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %60 = add(%59, meta[relay.Constant][37] /* ty=Tensor[(512, 1, 1), float32] */) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %61 = nn.relu(%60) /* ty=Tensor[(1, 512, 7, 7), float32] span=aten::relu__15:0:0 */;
  %62 = nn.conv2d(%61, meta[relay.Constant][38] /* ty=Tensor[(512, 512, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %63 = add(%62, meta[relay.Constant][39] /* ty=Tensor[(512, 1, 1), float32] */) /* ty=Tensor[(1, 512, 7, 7), float32] */;
  %64 = add(%63, %58) /* ty=Tensor[(1, 512, 7, 7), float32] span=aten::add__7:0:0 */;
  %65 = nn.relu(%64) /* ty=Tensor[(1, 512, 7, 7), float32] span=aten::relu__16:0:0 */;
  %66 = nn.adaptive_avg_pool2d(%65, output_size=[1, 1]) /* ty=Tensor[(1, 512, 1, 1), float32] span=aten::adaptive_avg_pool2d_0:0:0 */;
  %67 = reshape(%66, newshape=[0, -1, 1, 1]) /* ty=Tensor[(1, 512, 1, 1), float32] span=aten::flatten_0:0:0 */;
  %68 = squeeze(%67, axis=[2, 3]) /* ty=Tensor[(1, 512), float32] span=aten::flatten_0:0:0 */;
  %69 = nn.dense(%68, meta[relay.Constant][40] /* ty=Tensor[(1000, 512), float32] */, units=None) /* ty=Tensor[(1, 1000), float32] span=aten::linear_0:0:0 */;
  add(%69, meta[relay.Constant][41] /* ty=Tensor[(1000), float32] */) /* ty=Tensor[(1, 1000), float32] */
} /* ty=fn (Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] */