torch转ONNX:Unsqueeze

torch转ONNX:Unsqueeze#

%cd ../../..
import set_env
from d2py.utils.file import mkdir
temp_dir = ".temp"
mkdir(temp_dir)
/media/pc/data/lxw/ai/tvm-book/doc/tutorials/frontend
import onnx
import tvm
from tvm import relay
path = "/media/pc/data/board/arria10/lxw/tasks/tools/npu_user_demos/models/telecom/vehile_det_traffic_yolov5n/yolov5n_384_640_.onnx"
onnx_model = onnx.load(path)

# 模型配置
ENV = {
    "model_type": "onnx",
    "input_name": "images",
    "channel": 3,
    "height": 384, 
    "width": 640,
    "mode": "RGB", # 输入图片格式
    "mean": (0,),
    "std": (1,)
}
shape = 1, ENV["channel"], ENV["height"], ENV["width"]
mod, params = relay.frontend.from_onnx(onnx_model, {ENV["input_name"]: shape}, freeze_params=True)
# with tvm.transform.PassContext(opt_level=3):
#     mod = relay.quantize.prerequisite_optimize(mod, params)
mod.show()
def @main(%images: Tensor[(1, 3, 384, 640), float32] /* ty=Tensor[(1, 3, 384, 640), float32] span=Conv_0.images:0:0 */) -> Tensor[(1, 84, 5040), float32] {
  %0 = nn.conv2d(%images, meta[relay.Constant][1] /* ty=Tensor[(16, 3, 6, 6), float32] span=Conv_0.model.0.conv.weight:0:0 */, strides=[2, 2], padding=[2, 2, 2, 2], channels=16, kernel_size=[6, 6]) /* ty=Tensor[(1, 16, 192, 320), float32] span=Conv_0:0:0 */;
  %1 = nn.bias_add(%0, meta[relay.Constant][2] /* ty=Tensor[(16), float32] span=Conv_0.model.0.conv.bias:0:0 */) /* ty=Tensor[(1, 16, 192, 320), float32] span=Conv_0:0:0 */;
  %2 = nn.relu(%1) /* ty=Tensor[(1, 16, 192, 320), float32] span=Relu_1:0:0 */;
  %3 = nn.conv2d(%2, meta[relay.Constant][3] /* ty=Tensor[(32, 16, 3, 3), float32] span=Conv_2.model.1.conv.weight:0:0 */, strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]) /* ty=Tensor[(1, 32, 96, 160), float32] span=Conv_2:0:0 */;
  %4 = nn.bias_add(%3, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 96, 160), float32] span=Conv_2:0:0 */;
  %5 = nn.relu(%4) /* ty=Tensor[(1, 32, 96, 160), float32] span=Relu_3:0:0 */;
  %6 = nn.conv2d(%5, meta[relay.Constant][5] /* ty=Tensor[(16, 32, 1, 1), float32] span=Conv_4.model.2.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]) /* ty=Tensor[(1, 16, 96, 160), float32] span=Conv_4:0:0 */;
  %7 = nn.bias_add(%6, meta[relay.Constant][2] /* ty=Tensor[(16), float32] span=Conv_0.model.0.conv.bias:0:0 */) /* ty=Tensor[(1, 16, 96, 160), float32] span=Conv_4:0:0 */;
  %8 = nn.relu(%7) /* ty=Tensor[(1, 16, 96, 160), float32] span=Relu_5:0:0 */;
  %9 = nn.conv2d(%8, meta[relay.Constant][6] /* ty=Tensor[(16, 16, 1, 1), float32] span=Conv_6.model.2.m.0.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]) /* ty=Tensor[(1, 16, 96, 160), float32] span=Conv_6:0:0 */;
  %10 = nn.bias_add(%9, meta[relay.Constant][2] /* ty=Tensor[(16), float32] span=Conv_0.model.0.conv.bias:0:0 */) /* ty=Tensor[(1, 16, 96, 160), float32] span=Conv_6:0:0 */;
  %11 = nn.relu(%10) /* ty=Tensor[(1, 16, 96, 160), float32] span=Relu_7:0:0 */;
  %12 = nn.conv2d(%11, meta[relay.Constant][7] /* ty=Tensor[(16, 16, 3, 3), float32] span=Conv_8.model.2.m.0.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=16, kernel_size=[3, 3]) /* ty=Tensor[(1, 16, 96, 160), float32] span=Conv_8:0:0 */;
  %13 = nn.bias_add(%12, meta[relay.Constant][2] /* ty=Tensor[(16), float32] span=Conv_0.model.0.conv.bias:0:0 */) /* ty=Tensor[(1, 16, 96, 160), float32] span=Conv_8:0:0 */;
  %14 = nn.relu(%13) /* ty=Tensor[(1, 16, 96, 160), float32] span=Relu_9:0:0 */;
  %15 = nn.conv2d(%5, meta[relay.Constant][8] /* ty=Tensor[(16, 32, 1, 1), float32] span=Conv_11.model.2.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]) /* ty=Tensor[(1, 16, 96, 160), float32] span=Conv_11:0:0 */;
  %16 = nn.bias_add(%15, meta[relay.Constant][2] /* ty=Tensor[(16), float32] span=Conv_0.model.0.conv.bias:0:0 */) /* ty=Tensor[(1, 16, 96, 160), float32] span=Conv_11:0:0 */;
  %17 = add(%8, %14) /* ty=Tensor[(1, 16, 96, 160), float32] span=Add_10:0:0 */;
  %18 = nn.relu(%16) /* ty=Tensor[(1, 16, 96, 160), float32] span=Relu_12:0:0 */;
  %19 = (%17, %18) /* ty=(Tensor[(1, 16, 96, 160), float32], Tensor[(1, 16, 96, 160), float32]) span=Concat_13:0:0 */;
  %20 = concatenate(%19, axis=1) /* ty=Tensor[(1, 32, 96, 160), float32] span=Concat_13:0:0 */;
  %21 = nn.conv2d(%20, meta[relay.Constant][9] /* ty=Tensor[(32, 32, 1, 1), float32] span=Conv_14.model.2.cv3.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]) /* ty=Tensor[(1, 32, 96, 160), float32] span=Conv_14:0:0 */;
  %22 = nn.bias_add(%21, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 96, 160), float32] span=Conv_14:0:0 */;
  %23 = nn.relu(%22) /* ty=Tensor[(1, 32, 96, 160), float32] span=Relu_15:0:0 */;
  %24 = nn.conv2d(%23, meta[relay.Constant][10] /* ty=Tensor[(64, 32, 3, 3), float32] span=Conv_16.model.3.conv.weight:0:0 */, strides=[2, 2], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_16:0:0 */;
  %25 = nn.bias_add(%24, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_16:0:0 */;
  %26 = nn.relu(%25) /* ty=Tensor[(1, 64, 48, 80), float32] span=Relu_17:0:0 */;
  %27 = nn.conv2d(%26, meta[relay.Constant][12] /* ty=Tensor[(32, 64, 1, 1), float32] span=Conv_18.model.4.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_18:0:0 */;
  %28 = nn.bias_add(%27, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_18:0:0 */;
  %29 = nn.relu(%28) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_19:0:0 */;
  %30 = nn.conv2d(%29, meta[relay.Constant][13] /* ty=Tensor[(32, 32, 1, 1), float32] span=Conv_20.model.4.m.0.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_20:0:0 */;
  %31 = nn.bias_add(%30, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_20:0:0 */;
  %32 = nn.relu(%31) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_21:0:0 */;
  %33 = nn.conv2d(%32, meta[relay.Constant][14] /* ty=Tensor[(32, 32, 3, 3), float32] span=Conv_22.model.4.m.0.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_22:0:0 */;
  %34 = nn.bias_add(%33, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_22:0:0 */;
  %35 = nn.relu(%34) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_23:0:0 */;
  %36 = add(%29, %35) /* ty=Tensor[(1, 32, 48, 80), float32] span=Add_24:0:0 */;
  %37 = nn.conv2d(%36, meta[relay.Constant][15] /* ty=Tensor[(32, 32, 1, 1), float32] span=Conv_25.model.4.m.1.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_25:0:0 */;
  %38 = nn.bias_add(%37, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_25:0:0 */;
  %39 = nn.relu(%38) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_26:0:0 */;
  %40 = nn.conv2d(%39, meta[relay.Constant][16] /* ty=Tensor[(32, 32, 3, 3), float32] span=Conv_27.model.4.m.1.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_27:0:0 */;
  %41 = nn.bias_add(%40, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_27:0:0 */;
  %42 = nn.relu(%41) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_28:0:0 */;
  %43 = nn.conv2d(%26, meta[relay.Constant][17] /* ty=Tensor[(32, 64, 1, 1), float32] span=Conv_30.model.4.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_30:0:0 */;
  %44 = nn.bias_add(%43, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_30:0:0 */;
  %45 = add(%36, %42) /* ty=Tensor[(1, 32, 48, 80), float32] span=Add_29:0:0 */;
  %46 = nn.relu(%44) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_31:0:0 */;
  %47 = (%45, %46) /* ty=(Tensor[(1, 32, 48, 80), float32], Tensor[(1, 32, 48, 80), float32]) span=Concat_32:0:0 */;
  %48 = concatenate(%47, axis=1) /* ty=Tensor[(1, 64, 48, 80), float32] span=Concat_32:0:0 */;
  %49 = nn.conv2d(%48, meta[relay.Constant][18] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_33.model.4.cv3.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_33:0:0 */;
  %50 = nn.bias_add(%49, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_33:0:0 */;
  %51 = nn.relu(%50) /* ty=Tensor[(1, 64, 48, 80), float32] span=Relu_34:0:0 */;
  %52 = nn.conv2d(%51, meta[relay.Constant][19] /* ty=Tensor[(128, 64, 3, 3), float32] span=Conv_35.model.5.conv.weight:0:0 */, strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 24, 40), float32] span=Conv_35:0:0 */;
  %53 = nn.bias_add(%52, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 24, 40), float32] span=Conv_35:0:0 */;
  %54 = nn.relu(%53) /* ty=Tensor[(1, 128, 24, 40), float32] span=Relu_36:0:0 */;
  %55 = nn.conv2d(%54, meta[relay.Constant][21] /* ty=Tensor[(64, 128, 1, 1), float32] span=Conv_37.model.6.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_37:0:0 */;
  %56 = nn.bias_add(%55, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_37:0:0 */;
  %57 = nn.relu(%56) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_38:0:0 */;
  %58 = nn.conv2d(%57, meta[relay.Constant][22] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_39.model.6.m.0.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_39:0:0 */;
  %59 = nn.bias_add(%58, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_39:0:0 */;
  %60 = nn.relu(%59) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_40:0:0 */;
  %61 = nn.conv2d(%60, meta[relay.Constant][23] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_41.model.6.m.0.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_41:0:0 */;
  %62 = nn.bias_add(%61, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_41:0:0 */;
  %63 = nn.relu(%62) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_42:0:0 */;
  %64 = add(%57, %63) /* ty=Tensor[(1, 64, 24, 40), float32] span=Add_43:0:0 */;
  %65 = nn.conv2d(%64, meta[relay.Constant][24] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_44.model.6.m.1.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_44:0:0 */;
  %66 = nn.bias_add(%65, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_44:0:0 */;
  %67 = nn.relu(%66) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_45:0:0 */;
  %68 = nn.conv2d(%67, meta[relay.Constant][25] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_46.model.6.m.1.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_46:0:0 */;
  %69 = nn.bias_add(%68, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_46:0:0 */;
  %70 = nn.relu(%69) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_47:0:0 */;
  %71 = add(%64, %70) /* ty=Tensor[(1, 64, 24, 40), float32] span=Add_48:0:0 */;
  %72 = nn.conv2d(%71, meta[relay.Constant][26] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_49.model.6.m.2.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_49:0:0 */;
  %73 = nn.bias_add(%72, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_49:0:0 */;
  %74 = nn.relu(%73) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_50:0:0 */;
  %75 = nn.conv2d(%74, meta[relay.Constant][27] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_51.model.6.m.2.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_51:0:0 */;
  %76 = nn.bias_add(%75, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_51:0:0 */;
  %77 = nn.relu(%76) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_52:0:0 */;
  %78 = nn.conv2d(%54, meta[relay.Constant][28] /* ty=Tensor[(64, 128, 1, 1), float32] span=Conv_54.model.6.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_54:0:0 */;
  %79 = nn.bias_add(%78, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_54:0:0 */;
  %80 = add(%71, %77) /* ty=Tensor[(1, 64, 24, 40), float32] span=Add_53:0:0 */;
  %81 = nn.relu(%79) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_55:0:0 */;
  %82 = (%80, %81) /* ty=(Tensor[(1, 64, 24, 40), float32], Tensor[(1, 64, 24, 40), float32]) span=Concat_56:0:0 */;
  %83 = concatenate(%82, axis=1) /* ty=Tensor[(1, 128, 24, 40), float32] span=Concat_56:0:0 */;
  %84 = nn.conv2d(%83, meta[relay.Constant][29] /* ty=Tensor[(128, 128, 1, 1), float32] span=Conv_57.model.6.cv3.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 24, 40), float32] span=Conv_57:0:0 */;
  %85 = nn.bias_add(%84, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 24, 40), float32] span=Conv_57:0:0 */;
  %86 = nn.relu(%85) /* ty=Tensor[(1, 128, 24, 40), float32] span=Relu_58:0:0 */;
  %87 = nn.conv2d(%86, meta[relay.Constant][30] /* ty=Tensor[(256, 128, 3, 3), float32] span=Conv_59.model.7.conv.weight:0:0 */, strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* ty=Tensor[(1, 256, 12, 20), float32] span=Conv_59:0:0 */;
  %88 = nn.bias_add(%87, meta[relay.Constant][31] /* ty=Tensor[(256), float32] span=Conv_59.model.7.conv.bias:0:0 */) /* ty=Tensor[(1, 256, 12, 20), float32] span=Conv_59:0:0 */;
  %89 = nn.relu(%88) /* ty=Tensor[(1, 256, 12, 20), float32] span=Relu_60:0:0 */;
  %90 = nn.conv2d(%89, meta[relay.Constant][32] /* ty=Tensor[(128, 256, 1, 1), float32] span=Conv_61.model.8.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_61:0:0 */;
  %91 = nn.bias_add(%90, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_61:0:0 */;
  %92 = nn.relu(%91) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_62:0:0 */;
  %93 = nn.conv2d(%92, meta[relay.Constant][33] /* ty=Tensor[(128, 128, 1, 1), float32] span=Conv_63.model.8.m.0.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_63:0:0 */;
  %94 = nn.bias_add(%93, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_63:0:0 */;
  %95 = nn.relu(%94) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_64:0:0 */;
  %96 = nn.conv2d(%95, meta[relay.Constant][34] /* ty=Tensor[(128, 128, 3, 3), float32] span=Conv_65.model.8.m.0.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_65:0:0 */;
  %97 = nn.bias_add(%96, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_65:0:0 */;
  %98 = nn.relu(%97) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_66:0:0 */;
  %99 = nn.conv2d(%89, meta[relay.Constant][35] /* ty=Tensor[(128, 256, 1, 1), float32] span=Conv_68.model.8.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_68:0:0 */;
  %100 = nn.bias_add(%99, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_68:0:0 */;
  %101 = add(%92, %98) /* ty=Tensor[(1, 128, 12, 20), float32] span=Add_67:0:0 */;
  %102 = nn.relu(%100) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_69:0:0 */;
  %103 = (%101, %102) /* ty=(Tensor[(1, 128, 12, 20), float32], Tensor[(1, 128, 12, 20), float32]) span=Concat_70:0:0 */;
  %104 = concatenate(%103, axis=1) /* ty=Tensor[(1, 256, 12, 20), float32] span=Concat_70:0:0 */;
  %105 = nn.conv2d(%104, meta[relay.Constant][36] /* ty=Tensor[(256, 256, 1, 1), float32] span=Conv_71.model.8.cv3.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* ty=Tensor[(1, 256, 12, 20), float32] span=Conv_71:0:0 */;
  %106 = nn.bias_add(%105, meta[relay.Constant][31] /* ty=Tensor[(256), float32] span=Conv_59.model.7.conv.bias:0:0 */) /* ty=Tensor[(1, 256, 12, 20), float32] span=Conv_71:0:0 */;
  %107 = nn.relu(%106) /* ty=Tensor[(1, 256, 12, 20), float32] span=Relu_72:0:0 */;
  %108 = nn.conv2d(%107, meta[relay.Constant][37] /* ty=Tensor[(128, 256, 1, 1), float32] span=Conv_73.model.9.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_73:0:0 */;
  %109 = nn.bias_add(%108, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_73:0:0 */;
  %110 = nn.relu(%109) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_74:0:0 */;
  %111 = nn.max_pool2d(%110, pool_size=[5, 5], padding=[2, 2, 2, 2]) /* ty=Tensor[(1, 128, 12, 20), float32] span=MaxPool_75:0:0 */;
  %112 = nn.max_pool2d(%111, pool_size=[5, 5], padding=[2, 2, 2, 2]) /* ty=Tensor[(1, 128, 12, 20), float32] span=MaxPool_76:0:0 */;
  %113 = nn.max_pool2d(%112, pool_size=[5, 5], padding=[2, 2, 2, 2]) /* ty=Tensor[(1, 128, 12, 20), float32] span=MaxPool_77:0:0 */;
  %114 = (%110, %111, %112, %113) /* ty=(Tensor[(1, 128, 12, 20), float32], Tensor[(1, 128, 12, 20), float32], Tensor[(1, 128, 12, 20), float32], Tensor[(1, 128, 12, 20), float32]) span=Concat_78:0:0 */;
  %115 = concatenate(%114, axis=1) /* ty=Tensor[(1, 512, 12, 20), float32] span=Concat_78:0:0 */;
  %116 = nn.conv2d(%115, meta[relay.Constant][38] /* ty=Tensor[(256, 512, 1, 1), float32] span=Conv_79.model.9.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* ty=Tensor[(1, 256, 12, 20), float32] span=Conv_79:0:0 */;
  %117 = nn.bias_add(%116, meta[relay.Constant][31] /* ty=Tensor[(256), float32] span=Conv_59.model.7.conv.bias:0:0 */) /* ty=Tensor[(1, 256, 12, 20), float32] span=Conv_79:0:0 */;
  %118 = nn.relu(%117) /* ty=Tensor[(1, 256, 12, 20), float32] span=Relu_80:0:0 */;
  %119 = nn.conv2d(%118, meta[relay.Constant][39] /* ty=Tensor[(128, 256, 1, 1), float32] span=Conv_81.model.10.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_81:0:0 */;
  %120 = nn.bias_add(%119, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_81:0:0 */;
  %121 = nn.relu(%120) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_82:0:0 */;
  %122 = image.resize2d(%121, size=[24, 40], roi=[0f, 0f, 0f, 0f], method="nearest_neighbor", coordinate_transformation_mode="asymmetric", rounding_method="floor", cubic_alpha=-0.75f) /* ty=Tensor[(1, 128, 24, 40), float32] span=Resize_84:0:0 */;
  %123 = (%122, %86) /* ty=(Tensor[(1, 128, 24, 40), float32], Tensor[(1, 128, 24, 40), float32]) span=Concat_85:0:0 */;
  %124 = concatenate(%123, axis=1) /* ty=Tensor[(1, 256, 24, 40), float32] span=Concat_85:0:0 */;
  %125 = nn.conv2d(%124, meta[relay.Constant][40] /* ty=Tensor[(64, 256, 1, 1), float32] span=Conv_86.model.13.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_86:0:0 */;
  %126 = nn.bias_add(%125, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_86:0:0 */;
  %127 = nn.relu(%126) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_87:0:0 */;
  %128 = nn.conv2d(%127, meta[relay.Constant][41] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_88.model.13.m.0.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_88:0:0 */;
  %129 = nn.bias_add(%128, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_88:0:0 */;
  %130 = nn.relu(%129) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_89:0:0 */;
  %131 = nn.conv2d(%130, meta[relay.Constant][42] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_90.model.13.m.0.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_90:0:0 */;
  %132 = nn.bias_add(%131, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_90:0:0 */;
  %133 = nn.conv2d(%124, meta[relay.Constant][43] /* ty=Tensor[(64, 256, 1, 1), float32] span=Conv_92.model.13.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_92:0:0 */;
  %134 = nn.bias_add(%133, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_92:0:0 */;
  %135 = nn.relu(%132) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_91:0:0 */;
  %136 = nn.relu(%134) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_93:0:0 */;
  %137 = (%135, %136) /* ty=(Tensor[(1, 64, 24, 40), float32], Tensor[(1, 64, 24, 40), float32]) span=Concat_94:0:0 */;
  %138 = concatenate(%137, axis=1) /* ty=Tensor[(1, 128, 24, 40), float32] span=Concat_94:0:0 */;
  %139 = nn.conv2d(%138, meta[relay.Constant][44] /* ty=Tensor[(128, 128, 1, 1), float32] span=Conv_95.model.13.cv3.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 24, 40), float32] span=Conv_95:0:0 */;
  %140 = nn.bias_add(%139, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 24, 40), float32] span=Conv_95:0:0 */;
  %141 = nn.relu(%140) /* ty=Tensor[(1, 128, 24, 40), float32] span=Relu_96:0:0 */;
  %142 = nn.conv2d(%141, meta[relay.Constant][45] /* ty=Tensor[(64, 128, 1, 1), float32] span=Conv_97.model.14.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_97:0:0 */;
  %143 = nn.bias_add(%142, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_97:0:0 */;
  %144 = nn.relu(%143) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_98:0:0 */;
  %145 = image.resize2d(%144, size=[48, 80], roi=[0f, 0f, 0f, 0f], method="nearest_neighbor", coordinate_transformation_mode="asymmetric", rounding_method="floor", cubic_alpha=-0.75f) /* ty=Tensor[(1, 64, 48, 80), float32] span=Resize_100:0:0 */;
  %146 = (%145, %51) /* ty=(Tensor[(1, 64, 48, 80), float32], Tensor[(1, 64, 48, 80), float32]) span=Concat_101:0:0 */;
  %147 = concatenate(%146, axis=1) /* ty=Tensor[(1, 128, 48, 80), float32] span=Concat_101:0:0 */;
  %148 = nn.conv2d(%147, meta[relay.Constant][46] /* ty=Tensor[(32, 128, 1, 1), float32] span=Conv_102.model.17.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_102:0:0 */;
  %149 = nn.bias_add(%148, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_102:0:0 */;
  %150 = nn.relu(%149) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_103:0:0 */;
  %151 = nn.conv2d(%150, meta[relay.Constant][47] /* ty=Tensor[(32, 32, 1, 1), float32] span=Conv_104.model.17.m.0.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_104:0:0 */;
  %152 = nn.bias_add(%151, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_104:0:0 */;
  %153 = nn.relu(%152) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_105:0:0 */;
  %154 = nn.conv2d(%153, meta[relay.Constant][48] /* ty=Tensor[(32, 32, 3, 3), float32] span=Conv_106.model.17.m.0.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_106:0:0 */;
  %155 = nn.bias_add(%154, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_106:0:0 */;
  %156 = nn.conv2d(%147, meta[relay.Constant][49] /* ty=Tensor[(32, 128, 1, 1), float32] span=Conv_108.model.17.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_108:0:0 */;
  %157 = nn.bias_add(%156, meta[relay.Constant][4] /* ty=Tensor[(32), float32] span=Conv_2.model.1.conv.bias:0:0 */) /* ty=Tensor[(1, 32, 48, 80), float32] span=Conv_108:0:0 */;
  %158 = nn.relu(%155) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_107:0:0 */;
  %159 = nn.relu(%157) /* ty=Tensor[(1, 32, 48, 80), float32] span=Relu_109:0:0 */;
  %160 = (%158, %159) /* ty=(Tensor[(1, 32, 48, 80), float32], Tensor[(1, 32, 48, 80), float32]) span=Concat_110:0:0 */;
  %161 = concatenate(%160, axis=1) /* ty=Tensor[(1, 64, 48, 80), float32] span=Concat_110:0:0 */;
  %162 = nn.conv2d(%161, meta[relay.Constant][50] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_111.model.17.cv3.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_111:0:0 */;
  %163 = nn.bias_add(%162, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_111:0:0 */;
  %164 = nn.relu(%163) /* ty=Tensor[(1, 64, 48, 80), float32] span=Relu_112:0:0 */;
  %165 = nn.conv2d(%164, meta[relay.Constant][51] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_144.model.24.cv2.0.0.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_144:0:0 */;
  %166 = nn.bias_add(%165, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_144:0:0 */;
  %167 = nn.relu(%166) /* ty=Tensor[(1, 64, 48, 80), float32] span=Relu_145:0:0 */;
  %168 = nn.conv2d(%167, meta[relay.Constant][52] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_146.model.24.cv2.0.1.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_146:0:0 */;
  %169 = nn.bias_add(%168, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_146:0:0 */;
  %170 = nn.relu(%169) /* ty=Tensor[(1, 64, 48, 80), float32] span=Relu_147:0:0 */;
  %171 = nn.conv2d(%170, meta[relay.Constant][53] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_148.model.24.cv2.0.2.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_148:0:0 */;
  %172 = nn.conv2d(%164, meta[relay.Constant][55] /* ty=Tensor[(80, 64, 3, 3), float32] span=Conv_149.model.24.cv3.0.0.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=80, kernel_size=[3, 3]) /* ty=Tensor[(1, 80, 48, 80), float32] span=Conv_149:0:0 */;
  %173 = nn.bias_add(%172, meta[relay.Constant][56] /* ty=Tensor[(80), float32] span=Conv_149.model.24.cv3.0.0.conv.bias:0:0 */) /* ty=Tensor[(1, 80, 48, 80), float32] span=Conv_149:0:0 */;
  %174 = nn.relu(%173) /* ty=Tensor[(1, 80, 48, 80), float32] span=Relu_150:0:0 */;
  %175 = nn.conv2d(%174, meta[relay.Constant][57] /* ty=Tensor[(80, 80, 3, 3), float32] span=Conv_151.model.24.cv3.0.1.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=80, kernel_size=[3, 3]) /* ty=Tensor[(1, 80, 48, 80), float32] span=Conv_151:0:0 */;
  %176 = nn.bias_add(%175, meta[relay.Constant][56] /* ty=Tensor[(80), float32] span=Conv_149.model.24.cv3.0.0.conv.bias:0:0 */) /* ty=Tensor[(1, 80, 48, 80), float32] span=Conv_151:0:0 */;
  %177 = nn.relu(%176) /* ty=Tensor[(1, 80, 48, 80), float32] span=Relu_152:0:0 */;
  %178 = nn.conv2d(%177, meta[relay.Constant][58] /* ty=Tensor[(80, 80, 1, 1), float32] span=Conv_153.model.24.cv3.0.2.weight:0:0 */, padding=[0, 0, 0, 0], channels=80, kernel_size=[1, 1]) /* ty=Tensor[(1, 80, 48, 80), float32] span=Conv_153:0:0 */;
  %179 = nn.bias_add(%171, meta[relay.Constant][54] /* ty=Tensor[(64), float32] span=Conv_148.model.24.cv2.0.2.bias:0:0 */) /* ty=Tensor[(1, 64, 48, 80), float32] span=Conv_148:0:0 */;
  %180 = nn.bias_add(%178, meta[relay.Constant][59] /* ty=Tensor[(80), float32] span=Conv_153.model.24.cv3.0.2.bias:0:0 */) /* ty=Tensor[(1, 80, 48, 80), float32] span=Conv_153:0:0 */;
  %181 = (%179, %180) /* ty=(Tensor[(1, 64, 48, 80), float32], Tensor[(1, 80, 48, 80), float32]) span=Concat_154:0:0 */;
  %182 = concatenate(%181, axis=1) /* ty=Tensor[(1, 144, 48, 80), float32] span=Concat_154:0:0 */;
  %183 = nn.conv2d(%164, meta[relay.Constant][60] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_113.model.18.conv.weight:0:0 */, strides=[2, 2], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_113:0:0 */;
  %184 = nn.bias_add(%183, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_113:0:0 */;
  %185 = nn.relu(%184) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_114:0:0 */;
  %186 = (%185, %144) /* ty=(Tensor[(1, 64, 24, 40), float32], Tensor[(1, 64, 24, 40), float32]) span=Concat_115:0:0 */;
  %187 = concatenate(%186, axis=1) /* ty=Tensor[(1, 128, 24, 40), float32] span=Concat_115:0:0 */;
  %188 = nn.conv2d(%187, meta[relay.Constant][61] /* ty=Tensor[(64, 128, 1, 1), float32] span=Conv_116.model.20.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_116:0:0 */;
  %189 = nn.bias_add(%188, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_116:0:0 */;
  %190 = nn.relu(%189) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_117:0:0 */;
  %191 = nn.conv2d(%190, meta[relay.Constant][62] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_118.model.20.m.0.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_118:0:0 */;
  %192 = nn.bias_add(%191, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_118:0:0 */;
  %193 = nn.relu(%192) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_119:0:0 */;
  %194 = nn.conv2d(%193, meta[relay.Constant][63] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_120.model.20.m.0.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_120:0:0 */;
  %195 = nn.bias_add(%194, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_120:0:0 */;
  %196 = nn.conv2d(%187, meta[relay.Constant][64] /* ty=Tensor[(64, 128, 1, 1), float32] span=Conv_122.model.20.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_122:0:0 */;
  %197 = nn.bias_add(%196, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_122:0:0 */;
  %198 = nn.relu(%195) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_121:0:0 */;
  %199 = nn.relu(%197) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_123:0:0 */;
  %200 = (%198, %199) /* ty=(Tensor[(1, 64, 24, 40), float32], Tensor[(1, 64, 24, 40), float32]) span=Concat_124:0:0 */;
  %201 = concatenate(%200, axis=1) /* ty=Tensor[(1, 128, 24, 40), float32] span=Concat_124:0:0 */;
  %202 = nn.conv2d(%201, meta[relay.Constant][65] /* ty=Tensor[(128, 128, 1, 1), float32] span=Conv_125.model.20.cv3.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 24, 40), float32] span=Conv_125:0:0 */;
  %203 = nn.bias_add(%202, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 24, 40), float32] span=Conv_125:0:0 */;
  %204 = nn.relu(%203) /* ty=Tensor[(1, 128, 24, 40), float32] span=Relu_126:0:0 */;
  %205 = nn.conv2d(%204, meta[relay.Constant][66] /* ty=Tensor[(64, 128, 3, 3), float32] span=Conv_155.model.24.cv2.1.0.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_155:0:0 */;
  %206 = nn.bias_add(%205, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_155:0:0 */;
  %207 = nn.relu(%206) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_156:0:0 */;
  %208 = nn.conv2d(%207, meta[relay.Constant][67] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_157.model.24.cv2.1.1.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_157:0:0 */;
  %209 = nn.bias_add(%208, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_157:0:0 */;
  %210 = nn.relu(%209) /* ty=Tensor[(1, 64, 24, 40), float32] span=Relu_158:0:0 */;
  %211 = nn.conv2d(%210, meta[relay.Constant][68] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_159.model.24.cv2.1.2.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_159:0:0 */;
  %212 = nn.conv2d(%204, meta[relay.Constant][69] /* ty=Tensor[(80, 128, 3, 3), float32] span=Conv_160.model.24.cv3.1.0.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=80, kernel_size=[3, 3]) /* ty=Tensor[(1, 80, 24, 40), float32] span=Conv_160:0:0 */;
  %213 = nn.bias_add(%212, meta[relay.Constant][56] /* ty=Tensor[(80), float32] span=Conv_149.model.24.cv3.0.0.conv.bias:0:0 */) /* ty=Tensor[(1, 80, 24, 40), float32] span=Conv_160:0:0 */;
  %214 = nn.relu(%213) /* ty=Tensor[(1, 80, 24, 40), float32] span=Relu_161:0:0 */;
  %215 = nn.conv2d(%214, meta[relay.Constant][70] /* ty=Tensor[(80, 80, 3, 3), float32] span=Conv_162.model.24.cv3.1.1.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=80, kernel_size=[3, 3]) /* ty=Tensor[(1, 80, 24, 40), float32] span=Conv_162:0:0 */;
  %216 = nn.bias_add(%215, meta[relay.Constant][56] /* ty=Tensor[(80), float32] span=Conv_149.model.24.cv3.0.0.conv.bias:0:0 */) /* ty=Tensor[(1, 80, 24, 40), float32] span=Conv_162:0:0 */;
  %217 = nn.relu(%216) /* ty=Tensor[(1, 80, 24, 40), float32] span=Relu_163:0:0 */;
  %218 = nn.conv2d(%217, meta[relay.Constant][71] /* ty=Tensor[(80, 80, 1, 1), float32] span=Conv_164.model.24.cv3.1.2.weight:0:0 */, padding=[0, 0, 0, 0], channels=80, kernel_size=[1, 1]) /* ty=Tensor[(1, 80, 24, 40), float32] span=Conv_164:0:0 */;
  %219 = nn.bias_add(%211, meta[relay.Constant][54] /* ty=Tensor[(64), float32] span=Conv_148.model.24.cv2.0.2.bias:0:0 */) /* ty=Tensor[(1, 64, 24, 40), float32] span=Conv_159:0:0 */;
  %220 = nn.bias_add(%218, meta[relay.Constant][72] /* ty=Tensor[(80), float32] span=Conv_164.model.24.cv3.1.2.bias:0:0 */) /* ty=Tensor[(1, 80, 24, 40), float32] span=Conv_164:0:0 */;
  %221 = (%219, %220) /* ty=(Tensor[(1, 64, 24, 40), float32], Tensor[(1, 80, 24, 40), float32]) span=Concat_165:0:0 */;
  %222 = concatenate(%221, axis=1) /* ty=Tensor[(1, 144, 24, 40), float32] span=Concat_165:0:0 */;
  %223 = nn.conv2d(%204, meta[relay.Constant][73] /* ty=Tensor[(128, 128, 3, 3), float32] span=Conv_127.model.21.conv.weight:0:0 */, strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_127:0:0 */;
  %224 = nn.bias_add(%223, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_127:0:0 */;
  %225 = nn.relu(%224) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_128:0:0 */;
  %226 = (%225, %121) /* ty=(Tensor[(1, 128, 12, 20), float32], Tensor[(1, 128, 12, 20), float32]) span=Concat_129:0:0 */;
  %227 = concatenate(%226, axis=1) /* ty=Tensor[(1, 256, 12, 20), float32] span=Concat_129:0:0 */;
  %228 = nn.conv2d(%227, meta[relay.Constant][74] /* ty=Tensor[(128, 256, 1, 1), float32] span=Conv_130.model.23.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_130:0:0 */;
  %229 = nn.bias_add(%228, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_130:0:0 */;
  %230 = nn.relu(%229) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_131:0:0 */;
  %231 = nn.conv2d(%230, meta[relay.Constant][75] /* ty=Tensor[(128, 128, 1, 1), float32] span=Conv_132.model.23.m.0.cv1.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_132:0:0 */;
  %232 = nn.bias_add(%231, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_132:0:0 */;
  %233 = nn.relu(%232) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_133:0:0 */;
  %234 = nn.conv2d(%233, meta[relay.Constant][76] /* ty=Tensor[(128, 128, 3, 3), float32] span=Conv_134.model.23.m.0.cv2.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_134:0:0 */;
  %235 = nn.bias_add(%234, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_134:0:0 */;
  %236 = nn.conv2d(%227, meta[relay.Constant][77] /* ty=Tensor[(128, 256, 1, 1), float32] span=Conv_136.model.23.cv2.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_136:0:0 */;
  %237 = nn.bias_add(%236, meta[relay.Constant][20] /* ty=Tensor[(128), float32] span=Conv_35.model.5.conv.bias:0:0 */) /* ty=Tensor[(1, 128, 12, 20), float32] span=Conv_136:0:0 */;
  %238 = nn.relu(%235) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_135:0:0 */;
  %239 = nn.relu(%237) /* ty=Tensor[(1, 128, 12, 20), float32] span=Relu_137:0:0 */;
  %240 = (%238, %239) /* ty=(Tensor[(1, 128, 12, 20), float32], Tensor[(1, 128, 12, 20), float32]) span=Concat_138:0:0 */;
  %241 = concatenate(%240, axis=1) /* ty=Tensor[(1, 256, 12, 20), float32] span=Concat_138:0:0 */;
  %242 = nn.conv2d(%241, meta[relay.Constant][78] /* ty=Tensor[(256, 256, 1, 1), float32] span=Conv_139.model.23.cv3.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* ty=Tensor[(1, 256, 12, 20), float32] span=Conv_139:0:0 */;
  %243 = nn.bias_add(%242, meta[relay.Constant][31] /* ty=Tensor[(256), float32] span=Conv_59.model.7.conv.bias:0:0 */) /* ty=Tensor[(1, 256, 12, 20), float32] span=Conv_139:0:0 */;
  %244 = nn.relu(%243) /* ty=Tensor[(1, 256, 12, 20), float32] span=Relu_140:0:0 */;
  %245 = nn.conv2d(%244, meta[relay.Constant][79] /* ty=Tensor[(64, 256, 3, 3), float32] span=Conv_166.model.24.cv2.2.0.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 12, 20), float32] span=Conv_166:0:0 */;
  %246 = nn.bias_add(%245, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 12, 20), float32] span=Conv_166:0:0 */;
  %247 = nn.relu(%246) /* ty=Tensor[(1, 64, 12, 20), float32] span=Relu_167:0:0 */;
  %248 = nn.conv2d(%247, meta[relay.Constant][80] /* ty=Tensor[(64, 64, 3, 3), float32] span=Conv_168.model.24.cv2.2.1.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 12, 20), float32] span=Conv_168:0:0 */;
  %249 = nn.bias_add(%248, meta[relay.Constant][11] /* ty=Tensor[(64), float32] span=Conv_16.model.3.conv.bias:0:0 */) /* ty=Tensor[(1, 64, 12, 20), float32] span=Conv_168:0:0 */;
  %250 = nn.relu(%249) /* ty=Tensor[(1, 64, 12, 20), float32] span=Relu_169:0:0 */;
  %251 = nn.conv2d(%250, meta[relay.Constant][81] /* ty=Tensor[(64, 64, 1, 1), float32] span=Conv_170.model.24.cv2.2.2.weight:0:0 */, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 12, 20), float32] span=Conv_170:0:0 */;
  %252 = nn.conv2d(%244, meta[relay.Constant][82] /* ty=Tensor[(80, 256, 3, 3), float32] span=Conv_171.model.24.cv3.2.0.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=80, kernel_size=[3, 3]) /* ty=Tensor[(1, 80, 12, 20), float32] span=Conv_171:0:0 */;
  %253 = nn.bias_add(%252, meta[relay.Constant][56] /* ty=Tensor[(80), float32] span=Conv_149.model.24.cv3.0.0.conv.bias:0:0 */) /* ty=Tensor[(1, 80, 12, 20), float32] span=Conv_171:0:0 */;
  %254 = nn.relu(%253) /* ty=Tensor[(1, 80, 12, 20), float32] span=Relu_172:0:0 */;
  %255 = nn.conv2d(%254, meta[relay.Constant][83] /* ty=Tensor[(80, 80, 3, 3), float32] span=Conv_173.model.24.cv3.2.1.conv.weight:0:0 */, padding=[1, 1, 1, 1], channels=80, kernel_size=[3, 3]) /* ty=Tensor[(1, 80, 12, 20), float32] span=Conv_173:0:0 */;
  %256 = nn.bias_add(%255, meta[relay.Constant][56] /* ty=Tensor[(80), float32] span=Conv_149.model.24.cv3.0.0.conv.bias:0:0 */) /* ty=Tensor[(1, 80, 12, 20), float32] span=Conv_173:0:0 */;
  %257 = nn.relu(%256) /* ty=Tensor[(1, 80, 12, 20), float32] span=Relu_174:0:0 */;
  %258 = nn.conv2d(%257, meta[relay.Constant][84] /* ty=Tensor[(80, 80, 1, 1), float32] span=Conv_175.model.24.cv3.2.2.weight:0:0 */, padding=[0, 0, 0, 0], channels=80, kernel_size=[1, 1]) /* ty=Tensor[(1, 80, 12, 20), float32] span=Conv_175:0:0 */;
  %259 = nn.bias_add(%251, meta[relay.Constant][54] /* ty=Tensor[(64), float32] span=Conv_148.model.24.cv2.0.2.bias:0:0 */) /* ty=Tensor[(1, 64, 12, 20), float32] span=Conv_170:0:0 */;
  %260 = nn.bias_add(%258, meta[relay.Constant][85] /* ty=Tensor[(80), float32] span=Conv_175.model.24.cv3.2.2.bias:0:0 */) /* ty=Tensor[(1, 80, 12, 20), float32] span=Conv_175:0:0 */;
  %261 = (%259, %260) /* ty=(Tensor[(1, 64, 12, 20), float32], Tensor[(1, 80, 12, 20), float32]) span=Concat_176:0:0 */;
  %262 = concatenate(%261, axis=1) /* ty=Tensor[(1, 144, 12, 20), float32] span=Concat_176:0:0 */;
  %263 = reshape(%182, newshape=[1, 144, -1]) /* ty=Tensor[(1, 144, 3840), float32] span=Reshape_309:0:0 */;
  %264 = reshape(%222, newshape=[1, 144, -1]) /* ty=Tensor[(1, 144, 960), float32] span=Reshape_312:0:0 */;
  %265 = reshape(%262, newshape=[1, 144, -1]) /* ty=Tensor[(1, 144, 240), float32] span=Reshape_315:0:0 */;
  %266 = (%263, %264, %265) /* ty=(Tensor[(1, 144, 3840), float32], Tensor[(1, 144, 960), float32], Tensor[(1, 144, 240), float32]) span=Concat_316:0:0 */;
  %267 = concatenate(%266, axis=2) /* ty=Tensor[(1, 144, 5040), float32] span=Concat_316:0:0 */;
  %268 = split(%267, indices_or_sections=[64], axis=1) /* ty=(Tensor[(1, 64, 5040), float32], Tensor[(1, 80, 5040), float32]) span=Split_317:0:0 */;
  %269 = %268.0 /* ty=Tensor[(1, 64, 5040), float32] span=Split_317:0:0 */;
  %270 = reshape(%269, newshape=[1, 4, 16, 5040]) /* ty=Tensor[(1, 4, 16, 5040), float32] span=Reshape_327:0:0 */;
  %271 = transpose(%270, axes=[0, 3, 1, 2]) /* ty=Tensor[(1, 5040, 4, 16), float32] span=Transpose_328:0:0 */;
  %272 = nn.softmax(%271, axis=3) /* ty=Tensor[(1, 5040, 4, 16), float32] span=Softmax_329:0:0 */;
  %273 = transpose(%272, axes=[0, 3, 2, 1]) /* ty=Tensor[(1, 16, 4, 5040), float32] span=Transpose_330:0:0 */;
  %274 = nn.conv2d(%273, meta[relay.Constant][86] /* ty=Tensor[(1, 16, 1, 1), float32] span=Conv_331.model.24.dfl.conv.weight:0:0 */, padding=[0, 0, 0, 0], channels=1, kernel_size=[1, 1]) /* ty=Tensor[(1, 1, 4, 5040), float32] span=Conv_331:0:0 */;
  %275 = reshape(%274, newshape=[1, 4, 5040]) /* ty=Tensor[(1, 4, 5040), float32] span=Reshape_335:0:0 */;
  %276 = strided_slice(%275, begin=[0i64], end=[2i64], strides=[1i64], axes=[1i64]) /* ty=Tensor[(1, 2, 5040), float32] span=Slice_347:0:0 */;
  %277 = strided_slice(%275, begin=[2i64], end=[4i64], strides=[1i64], axes=[1i64]) /* ty=Tensor[(1, 2, 5040), float32] span=Slice_350:0:0 */;
  %278 = subtract(meta[relay.Constant][0] /* ty=Tensor[(1, 2, 5040), float32] span=Unsqueeze_336:0:0 */, %276) /* ty=Tensor[(1, 2, 5040), float32] span=Sub_351:0:0 */;
  %279 = add(meta[relay.Constant][0] /* ty=Tensor[(1, 2, 5040), float32] span=Unsqueeze_336:0:0 */, %277) /* ty=Tensor[(1, 2, 5040), float32] span=Add_352:0:0 */;
  %280 = add(%278, %279) /* ty=Tensor[(1, 2, 5040), float32] span=Add_353:0:0 */;
  %281 = divide(%280, 2f /* ty=float32 span=Div_354.559:0:0 */) /* ty=Tensor[(1, 2, 5040), float32] span=Div_354:0:0 */;
  %282 = subtract(%279, %278) /* ty=Tensor[(1, 2, 5040), float32] span=Sub_355:0:0 */;
  %283 = (%281, %282) /* ty=(Tensor[(1, 2, 5040), float32], Tensor[(1, 2, 5040), float32]) span=Concat_356:0:0 */;
  %284 = concatenate(%283, axis=1) /* ty=Tensor[(1, 4, 5040), float32] span=Concat_356:0:0 */;
  %285 = %268.1 /* ty=Tensor[(1, 80, 5040), float32] span=Split_317:0:0 */;
  %286 = multiply(%284, meta[relay.Constant][87] /* ty=Tensor[(1, 5040), float32] span=Transpose_306:0:0 */) /* ty=Tensor[(1, 4, 5040), float32] span=Mul_357:0:0 */;
  %287 = sigmoid(%285) /* ty=Tensor[(1, 80, 5040), float32] span=Sigmoid_358:0:0 */;
  %288 = (%286, %287) /* ty=(Tensor[(1, 4, 5040), float32], Tensor[(1, 80, 5040), float32]) span=Concat_359:0:0 */;
  concatenate(%288, axis=1) /* ty=Tensor[(1, 84, 5040), float32] span=Concat_359:0:0 */
}
with tvm.transform.PassContext(opt_level=3):
    with relay.quantize.qconfig(
        skip_conv_layers=[],
        # calibrate_mode="kl_divergence", 
        weight_scale="max",
        # round_for_shift=True,
        # rounding="TONEAREST", # "UPWARD" or "TONEAREST"
        # calibrate_skip_layers=[],
        skip_dense_layer=False,
    ):
        qmod = relay.quantize.quantize(mod, params)
qmod.show()