ONNX Relax 测试#
path = "/media/pc/data/board/arria10/lxw/tasks/tools/npuusertools/models/test/nanotrack_head/nanotrack_head.onnx"
import onnx
import tvm
from tvm import relax
from tvm.relax.frontend.onnx import from_onnx
model = onnx.load(path)
mod = from_onnx(model)
mod.show()
# from tvm.script import ir as I
# from tvm.script import tir as T
# from tvm.script import relax as R
@I.ir_module
class Module:
@T.prim_func(private=True)
def maximum(A: T.Buffer((T.int64(1), T.int64(64), T.int64(15), T.int64(15)), "float32"), B: T.Buffer((), "float32"), T_maximum: T.Buffer((T.int64(1), T.int64(64), T.int64(15), T.int64(15)), "float32")):
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(64), T.int64(15), T.int64(15)):
with T.block("T_maximum"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(A[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
T.writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T.max(A[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
@T.prim_func(private=True)
def maximum1(A: T.Buffer((T.int64(1), T.int64(96), T.int64(15), T.int64(15)), "float32"), B: T.Buffer((), "float32"), T_maximum: T.Buffer((T.int64(1), T.int64(96), T.int64(15), T.int64(15)), "float32")):
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(96), T.int64(15), T.int64(15)):
with T.block("T_maximum"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(A[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
T.writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T.max(A[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
@T.prim_func(private=True)
def minimum(lv24: T.Buffer((T.int64(1), T.int64(64), T.int64(15), T.int64(15)), "float32"), B: T.Buffer((), "float32"), T_minimum: T.Buffer((T.int64(1), T.int64(64), T.int64(15), T.int64(15)), "float32")):
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(64), T.int64(15), T.int64(15)):
with T.block("T_minimum"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(lv24[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
T.writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T.min(lv24[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
@T.prim_func(private=True)
def minimum1(lv84: T.Buffer((T.int64(1), T.int64(96), T.int64(15), T.int64(15)), "float32"), B: T.Buffer((), "float32"), T_minimum: T.Buffer((T.int64(1), T.int64(96), T.int64(15), T.int64(15)), "float32")):
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(96), T.int64(15), T.int64(15)):
with T.block("T_minimum"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(lv84[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
T.writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T.min(lv84[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
@R.function
def main(input1: R.Tensor((1, 96, 8, 8), dtype="float32"), input2: R.Tensor((1, 96, 16, 16), dtype="float32")) -> R.Tuple(R.Tensor((1, 2, 15, 15), dtype="float32"), R.Tensor((1, 4, 15, 15), dtype="float32")):
R.func_attr({"num_input": 2})
cls = Module
with R.dataflow():
lv: R.Tensor((1, 96, 4, 8), dtype="float32") = R.strided_slice(input1, (R.prim_value(2),), (R.prim_value(2),), (R.prim_value(6),), (R.prim_value(1),), assume_inbound=False)
lv1: R.Tensor((1, 96, 8, 8), dtype="float32") = R.nn.conv2d(input1, metadata["relax.expr.Constant"][0], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv2: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][1], R.shape([1, 96, 1, 1]))
lv3: R.Tensor((1, 96, 16, 16), dtype="float32") = R.nn.conv2d(input2, metadata["relax.expr.Constant"][2], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv4: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][3], R.shape([1, 96, 1, 1]))
lv5: R.Tensor((1, 96, 8, 8), dtype="float32") = R.add(lv1, lv2)
lv6: R.Tensor((1, 96, 64), dtype="float32") = R.reshape(lv5, R.shape([1, 96, 64]))
lv7: R.Tensor((1, 64, 96), dtype="float32") = R.permute_dims(lv6, axes=[0, 2, 1])
lv8: R.Tensor((1, 96, 16, 16), dtype="float32") = R.add(lv3, lv4)
lv9: R.Tensor((64, 96, 1, 1), dtype="float32") = R.reshape(lv7, R.shape([64, 96, 1, 1]))
lv10: R.Tensor((1, 64, 16, 16), dtype="float32") = R.nn.conv2d(lv8, lv9, strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv11: R.Tensor((1, 64, 1, 1), dtype="float32") = R.mean(lv10, axis=[2, 3], keepdims=True)
lv12: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.conv2d(lv11, metadata["relax.expr.Constant"][4], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv13: R.Tensor((1, 64, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][5], R.shape([1, 64, 1, 1]))
lv14: R.Tensor((1, 64, 1, 1), dtype="float32") = R.add(lv12, lv13)
lv15: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.relu(lv14)
lv16: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.conv2d(lv15, metadata["relax.expr.Constant"][6], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv17: R.Tensor((1, 64, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][7], R.shape([1, 64, 1, 1]))
lv18: R.Tensor((1, 64, 1, 1), dtype="float32") = R.add(lv16, lv17)
lv19: R.Tensor((1, 64, 1, 1), dtype="float32") = R.sigmoid(lv18)
lv20: R.Tensor((1, 64, 16, 16), dtype="float32") = R.multiply(lv10, lv19)
lv21: R.Tensor((1, 64, 15, 15), dtype="float32") = R.nn.conv2d(lv20, metadata["relax.expr.Constant"][8], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=64, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv22: R.Tensor((1, 64, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][9], R.shape([1, 64, 1, 1]))
lv23: R.Tensor((1, 64, 15, 15), dtype="float32") = R.add(lv21, lv22)
lv24 = R.call_tir(cls.maximum, (lv23, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 64, 15, 15), dtype="float32"))
lv25 = R.call_tir(cls.minimum, (lv24, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 64, 15, 15), dtype="float32"))
lv26: R.Tensor((1, 64, 15, 15), dtype="float32") = R.nn.conv2d(lv25, metadata["relax.expr.Constant"][10], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv27: R.Tensor((1, 64, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][11], R.shape([1, 64, 1, 1]))
lv28: R.Tensor((1, 96, 8, 8), dtype="float32") = R.nn.conv2d(input1, metadata["relax.expr.Constant"][12], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv29: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][13], R.shape([1, 96, 1, 1]))
lv30: R.Tensor((1, 96, 16, 16), dtype="float32") = R.nn.conv2d(input2, metadata["relax.expr.Constant"][14], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv31: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][15], R.shape([1, 96, 1, 1]))
lv32: R.Tensor((1, 96, 8, 8), dtype="float32") = R.add(lv28, lv29)
lv33: R.Tensor((1, 96, 64), dtype="float32") = R.reshape(lv32, R.shape([1, 96, 64]))
lv34: R.Tensor((1, 64, 96), dtype="float32") = R.permute_dims(lv33, axes=[0, 2, 1])
lv35: R.Tensor((1, 96, 16, 16), dtype="float32") = R.add(lv30, lv31)
lv36: R.Tensor((64, 96, 1, 1), dtype="float32") = R.reshape(lv34, R.shape([64, 96, 1, 1]))
lv37: R.Tensor((1, 64, 16, 16), dtype="float32") = R.nn.conv2d(lv35, lv36, strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv38: R.Tensor((1, 64, 1, 1), dtype="float32") = R.mean(lv37, axis=[2, 3], keepdims=True)
lv39: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.conv2d(lv38, metadata["relax.expr.Constant"][16], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv40: R.Tensor((1, 64, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][17], R.shape([1, 64, 1, 1]))
lv41: R.Tensor((1, 64, 1, 1), dtype="float32") = R.add(lv39, lv40)
lv42: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.relu(lv41)
lv43: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.conv2d(lv42, metadata["relax.expr.Constant"][18], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv44: R.Tensor((1, 64, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][19], R.shape([1, 64, 1, 1]))
lv45: R.Tensor((1, 64, 1, 1), dtype="float32") = R.add(lv43, lv44)
lv46: R.Tensor((1, 64, 1, 1), dtype="float32") = R.sigmoid(lv45)
lv47: R.Tensor((1, 64, 16, 16), dtype="float32") = R.multiply(lv37, lv46)
lv48: R.Tensor((1, 64, 15, 15), dtype="float32") = R.nn.conv2d(lv47, metadata["relax.expr.Constant"][20], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=64, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv49: R.Tensor((1, 64, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][21], R.shape([1, 64, 1, 1]))
lv50: R.Tensor((1, 64, 15, 15), dtype="float32") = R.add(lv48, lv49)
lv51 = R.call_tir(cls.maximum, (lv50, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 64, 15, 15), dtype="float32"))
lv52 = R.call_tir(cls.minimum, (lv51, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 64, 15, 15), dtype="float32"))
lv53: R.Tensor((1, 64, 15, 15), dtype="float32") = R.nn.conv2d(lv52, metadata["relax.expr.Constant"][22], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv54: R.Tensor((1, 64, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][23], R.shape([1, 64, 1, 1]))
lv55: R.Tensor((1, 96, 4, 4), dtype="float32") = R.strided_slice(lv, (R.prim_value(3),), (R.prim_value(2),), (R.prim_value(6),), (R.prim_value(1),), assume_inbound=False)
lv56: R.Tensor((1, 96, 4, 4), dtype="float32") = R.nn.conv2d(lv55, metadata["relax.expr.Constant"][24], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv57: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][25], R.shape([1, 96, 1, 1]))
lv58: R.Tensor((1, 96, 16, 16), dtype="float32") = R.nn.conv2d(input2, metadata["relax.expr.Constant"][26], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv59: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][27], R.shape([1, 96, 1, 1]))
lv60: R.Tensor((1, 96, 4, 4), dtype="float32") = R.add(lv56, lv57)
lv61: R.Tensor((1, 96, 16, 16), dtype="float32") = R.add(lv58, lv59)
lv62: R.Tensor((96, 1, 4, 4), dtype="float32") = R.reshape(lv60, R.shape([96, 1, 4, 4]))
lv63: R.Tensor((1, 96, 4, 4), dtype="float32") = R.nn.conv2d(lv55, metadata["relax.expr.Constant"][28], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv64: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][29], R.shape([1, 96, 1, 1]))
lv65: R.Tensor((1, 96, 16, 16), dtype="float32") = R.nn.conv2d(input2, metadata["relax.expr.Constant"][30], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv66: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][31], R.shape([1, 96, 1, 1]))
lv67: R.Tensor((1, 96, 4, 4), dtype="float32") = R.add(lv63, lv64)
lv68: R.Tensor((1, 96, 16, 16), dtype="float32") = R.add(lv65, lv66)
lv69: R.Tensor((96, 1, 4, 4), dtype="float32") = R.reshape(lv67, R.shape([96, 1, 4, 4]))
lv70: R.Tensor((1, 64, 15, 15), dtype="float32") = R.add(lv26, lv27)
lv71: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv61, lv62, strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv72: R.Tensor((1, 160, 15, 15), dtype="float32") = R.concat((lv70, lv71), axis=1)
lv73: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv72, metadata["relax.expr.Constant"][32], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv74: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][33], R.shape([1, 96, 1, 1]))
lv75: R.Tensor((1, 64, 15, 15), dtype="float32") = R.add(lv53, lv54)
lv76: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv68, lv69, strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv77: R.Tensor((1, 160, 15, 15), dtype="float32") = R.concat((lv75, lv76), axis=1)
lv78: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv77, metadata["relax.expr.Constant"][34], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv79: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][35], R.shape([1, 96, 1, 1]))
lv80: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv78, lv79)
lv81: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv80, metadata["relax.expr.Constant"][36], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv82: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][37], R.shape([1, 96, 1, 1]))
lv83: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv81, lv82)
lv84 = R.call_tir(cls.maximum1, (lv83, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv85 = R.call_tir(cls.minimum1, (lv84, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv86: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv85, metadata["relax.expr.Constant"][38], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv87: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][39], R.shape([1, 96, 1, 1]))
lv88: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv86, lv87)
lv89: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv88, metadata["relax.expr.Constant"][40], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv90: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][41], R.shape([1, 96, 1, 1]))
lv91: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv89, lv90)
lv92 = R.call_tir(cls.maximum1, (lv91, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv93 = R.call_tir(cls.minimum1, (lv92, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv94: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv93, metadata["relax.expr.Constant"][42], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv95: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][43], R.shape([1, 96, 1, 1]))
lv96: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv94, lv95)
lv97: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv96, metadata["relax.expr.Constant"][44], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv98: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][45], R.shape([1, 96, 1, 1]))
lv99: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv97, lv98)
lv100 = R.call_tir(cls.maximum1, (lv99, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv101 = R.call_tir(cls.minimum1, (lv100, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv102: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv101, metadata["relax.expr.Constant"][46], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv103: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][47], R.shape([1, 96, 1, 1]))
lv104: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv102, lv103)
lv105: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv104, metadata["relax.expr.Constant"][48], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv106: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][49], R.shape([1, 96, 1, 1]))
lv107: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv105, lv106)
lv108 = R.call_tir(cls.maximum1, (lv107, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv109 = R.call_tir(cls.minimum1, (lv108, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv110: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv109, metadata["relax.expr.Constant"][50], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv111: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][51], R.shape([1, 96, 1, 1]))
lv112: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv110, lv111)
lv113: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv112, metadata["relax.expr.Constant"][52], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv114: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][53], R.shape([1, 96, 1, 1]))
lv115: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv113, lv114)
lv116 = R.call_tir(cls.maximum1, (lv115, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv117 = R.call_tir(cls.minimum1, (lv116, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv118: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv117, metadata["relax.expr.Constant"][54], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv119: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][55], R.shape([1, 96, 1, 1]))
lv120: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv118, lv119)
lv121: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv120, metadata["relax.expr.Constant"][56], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv122: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][57], R.shape([1, 96, 1, 1]))
lv123: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv121, lv122)
lv124 = R.call_tir(cls.maximum1, (lv123, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv125 = R.call_tir(cls.minimum1, (lv124, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv126: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv125, metadata["relax.expr.Constant"][58], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv127: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][59], R.shape([1, 96, 1, 1]))
lv128: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv126, lv127)
lv129: R.Tensor((1, 2, 15, 15), dtype="float32") = R.nn.conv2d(lv128, metadata["relax.expr.Constant"][60], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv130: R.Tensor((1, 2, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][61], R.shape([1, 2, 1, 1]))
lv131: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv73, lv74)
lv132: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv131, metadata["relax.expr.Constant"][62], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv133: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][63], R.shape([1, 96, 1, 1]))
lv134: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv132, lv133)
lv135 = R.call_tir(cls.maximum1, (lv134, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv136 = R.call_tir(cls.minimum1, (lv135, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv137: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv136, metadata["relax.expr.Constant"][64], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv138: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][65], R.shape([1, 96, 1, 1]))
lv139: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv137, lv138)
lv140: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv139, metadata["relax.expr.Constant"][66], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv141: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][67], R.shape([1, 96, 1, 1]))
lv142: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv140, lv141)
lv143 = R.call_tir(cls.maximum1, (lv142, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv144 = R.call_tir(cls.minimum1, (lv143, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv145: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv144, metadata["relax.expr.Constant"][68], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv146: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][69], R.shape([1, 96, 1, 1]))
lv147: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv145, lv146)
lv148: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv147, metadata["relax.expr.Constant"][70], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv149: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][71], R.shape([1, 96, 1, 1]))
lv150: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv148, lv149)
lv151 = R.call_tir(cls.maximum1, (lv150, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv152 = R.call_tir(cls.minimum1, (lv151, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv153: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv152, metadata["relax.expr.Constant"][72], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv154: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][73], R.shape([1, 96, 1, 1]))
lv155: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv153, lv154)
lv156: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv155, metadata["relax.expr.Constant"][74], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv157: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][75], R.shape([1, 96, 1, 1]))
lv158: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv156, lv157)
lv159 = R.call_tir(cls.maximum1, (lv158, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv160 = R.call_tir(cls.minimum1, (lv159, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv161: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv160, metadata["relax.expr.Constant"][76], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv162: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][77], R.shape([1, 96, 1, 1]))
lv163: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv161, lv162)
lv164: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv163, metadata["relax.expr.Constant"][78], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv165: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][79], R.shape([1, 96, 1, 1]))
lv166: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv164, lv165)
lv167 = R.call_tir(cls.maximum1, (lv166, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv168 = R.call_tir(cls.minimum1, (lv167, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv169: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv168, metadata["relax.expr.Constant"][80], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv170: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][81], R.shape([1, 96, 1, 1]))
lv171: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv169, lv170)
lv172: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv171, metadata["relax.expr.Constant"][82], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv173: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][83], R.shape([1, 96, 1, 1]))
lv174: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv172, lv173)
lv175 = R.call_tir(cls.maximum1, (lv174, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv176 = R.call_tir(cls.minimum1, (lv175, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv177: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv176, metadata["relax.expr.Constant"][84], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv178: R.Tensor((1, 96, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][85], R.shape([1, 96, 1, 1]))
lv179: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv177, lv178)
lv180: R.Tensor((1, 4, 15, 15), dtype="float32") = R.nn.conv2d(lv179, metadata["relax.expr.Constant"][86], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv181: R.Tensor((1, 4, 1, 1), dtype="float32") = R.reshape(metadata["relax.expr.Constant"][87], R.shape([1, 4, 1, 1]))
lv182: R.Tensor((1, 4, 15, 15), dtype="float32") = R.add(lv180, lv181)
lv183: R.Tensor((1, 2, 15, 15), dtype="float32") = R.add(lv129, lv130)
lv184: R.Tensor((1, 4, 15, 15), dtype="float32") = R.exp(lv182)
gv: R.Tuple(R.Tensor((1, 2, 15, 15), dtype="float32"), R.Tensor((1, 4, 15, 15), dtype="float32")) = lv183, lv184
R.output(gv)
return gv
# Metadata omitted. Use show_meta=True in script() method to show it.
fold_pipeline = tvm.transform.Sequential([
relax.transform.FoldBatchnormToConv2D(),
relax.transform.FoldConstant(),
relax.transform.RemoveRedundantReshape(),
# 将 BatchNorm 转换为一组更简单的算子以进行融合
relax.transform.DecomposeOpsForInference(),
# 规范化绑定
relax.transform.CanonicalizeBindings(),
])
# 初次优化模型
run_mod = fold_pipeline(mod)
run_mod.show()
# from tvm.script import ir as I
# from tvm.script import tir as T
# from tvm.script import relax as R
@I.ir_module
class Module:
@T.prim_func(private=True)
def maximum(A: T.Buffer((T.int64(1), T.int64(64), T.int64(15), T.int64(15)), "float32"), B: T.Buffer((), "float32"), T_maximum: T.Buffer((T.int64(1), T.int64(64), T.int64(15), T.int64(15)), "float32")):
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(64), T.int64(15), T.int64(15)):
with T.block("T_maximum"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(A[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
T.writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T.max(A[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
@T.prim_func(private=True)
def maximum1(A: T.Buffer((T.int64(1), T.int64(96), T.int64(15), T.int64(15)), "float32"), B: T.Buffer((), "float32"), T_maximum: T.Buffer((T.int64(1), T.int64(96), T.int64(15), T.int64(15)), "float32")):
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(96), T.int64(15), T.int64(15)):
with T.block("T_maximum"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(A[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
T.writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T.max(A[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
@T.prim_func(private=True)
def minimum(lv24: T.Buffer((T.int64(1), T.int64(64), T.int64(15), T.int64(15)), "float32"), B: T.Buffer((), "float32"), T_minimum: T.Buffer((T.int64(1), T.int64(64), T.int64(15), T.int64(15)), "float32")):
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(64), T.int64(15), T.int64(15)):
with T.block("T_minimum"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(lv24[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
T.writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T.min(lv24[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
@T.prim_func(private=True)
def minimum1(lv84: T.Buffer((T.int64(1), T.int64(96), T.int64(15), T.int64(15)), "float32"), B: T.Buffer((), "float32"), T_minimum: T.Buffer((T.int64(1), T.int64(96), T.int64(15), T.int64(15)), "float32")):
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T.grid(T.int64(1), T.int64(96), T.int64(15), T.int64(15)):
with T.block("T_minimum"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(lv84[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
T.writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T.min(lv84[v_ax0, v_ax1, v_ax2, v_ax3], B[()])
@R.function
def main(input1: R.Tensor((1, 96, 8, 8), dtype="float32"), input2: R.Tensor((1, 96, 16, 16), dtype="float32")) -> R.Tuple(R.Tensor((1, 2, 15, 15), dtype="float32"), R.Tensor((1, 4, 15, 15), dtype="float32")):
R.func_attr({"num_input": 2})
cls = Module
with R.dataflow():
lv: R.Tensor((1, 96, 4, 8), dtype="float32") = R.strided_slice(input1, (R.prim_value(2),), (R.prim_value(2),), (R.prim_value(6),), (R.prim_value(1),), assume_inbound=False)
lv1: R.Tensor((1, 96, 8, 8), dtype="float32") = R.nn.conv2d(input1, metadata["relax.expr.Constant"][0], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv3: R.Tensor((1, 96, 16, 16), dtype="float32") = R.nn.conv2d(input2, metadata["relax.expr.Constant"][1], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv5: R.Tensor((1, 96, 8, 8), dtype="float32") = R.add(lv1, metadata["relax.expr.Constant"][2])
lv6: R.Tensor((1, 96, 64), dtype="float32") = R.reshape(lv5, R.shape([1, 96, 64]))
lv7: R.Tensor((1, 64, 96), dtype="float32") = R.permute_dims(lv6, axes=[0, 2, 1])
lv8: R.Tensor((1, 96, 16, 16), dtype="float32") = R.add(lv3, metadata["relax.expr.Constant"][3])
lv9: R.Tensor((64, 96, 1, 1), dtype="float32") = R.reshape(lv7, R.shape([64, 96, 1, 1]))
lv10: R.Tensor((1, 64, 16, 16), dtype="float32") = R.nn.conv2d(lv8, lv9, strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv11: R.Tensor((1, 64, 1, 1), dtype="float32") = R.mean(lv10, axis=[2, 3], keepdims=True)
lv12: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.conv2d(lv11, metadata["relax.expr.Constant"][4], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv14: R.Tensor((1, 64, 1, 1), dtype="float32") = R.add(lv12, metadata["relax.expr.Constant"][5])
lv15: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.relu(lv14)
lv16: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.conv2d(lv15, metadata["relax.expr.Constant"][6], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv18: R.Tensor((1, 64, 1, 1), dtype="float32") = R.add(lv16, metadata["relax.expr.Constant"][7])
lv19: R.Tensor((1, 64, 1, 1), dtype="float32") = R.sigmoid(lv18)
lv20: R.Tensor((1, 64, 16, 16), dtype="float32") = R.multiply(lv10, lv19)
lv21: R.Tensor((1, 64, 15, 15), dtype="float32") = R.nn.conv2d(lv20, metadata["relax.expr.Constant"][8], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=64, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv23: R.Tensor((1, 64, 15, 15), dtype="float32") = R.add(lv21, metadata["relax.expr.Constant"][9])
lv24 = R.call_tir(cls.maximum, (lv23, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 64, 15, 15), dtype="float32"))
lv25 = R.call_tir(cls.minimum, (lv24, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 64, 15, 15), dtype="float32"))
lv26: R.Tensor((1, 64, 15, 15), dtype="float32") = R.nn.conv2d(lv25, metadata["relax.expr.Constant"][10], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv28: R.Tensor((1, 96, 8, 8), dtype="float32") = R.nn.conv2d(input1, metadata["relax.expr.Constant"][11], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv30: R.Tensor((1, 96, 16, 16), dtype="float32") = R.nn.conv2d(input2, metadata["relax.expr.Constant"][12], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv32: R.Tensor((1, 96, 8, 8), dtype="float32") = R.add(lv28, metadata["relax.expr.Constant"][13])
lv33: R.Tensor((1, 96, 64), dtype="float32") = R.reshape(lv32, R.shape([1, 96, 64]))
lv34: R.Tensor((1, 64, 96), dtype="float32") = R.permute_dims(lv33, axes=[0, 2, 1])
lv35: R.Tensor((1, 96, 16, 16), dtype="float32") = R.add(lv30, metadata["relax.expr.Constant"][14])
lv36: R.Tensor((64, 96, 1, 1), dtype="float32") = R.reshape(lv34, R.shape([64, 96, 1, 1]))
lv37: R.Tensor((1, 64, 16, 16), dtype="float32") = R.nn.conv2d(lv35, lv36, strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv38: R.Tensor((1, 64, 1, 1), dtype="float32") = R.mean(lv37, axis=[2, 3], keepdims=True)
lv39: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.conv2d(lv38, metadata["relax.expr.Constant"][15], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv41: R.Tensor((1, 64, 1, 1), dtype="float32") = R.add(lv39, metadata["relax.expr.Constant"][16])
lv42: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.relu(lv41)
lv43: R.Tensor((1, 64, 1, 1), dtype="float32") = R.nn.conv2d(lv42, metadata["relax.expr.Constant"][17], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv45: R.Tensor((1, 64, 1, 1), dtype="float32") = R.add(lv43, metadata["relax.expr.Constant"][18])
lv46: R.Tensor((1, 64, 1, 1), dtype="float32") = R.sigmoid(lv45)
lv47: R.Tensor((1, 64, 16, 16), dtype="float32") = R.multiply(lv37, lv46)
lv48: R.Tensor((1, 64, 15, 15), dtype="float32") = R.nn.conv2d(lv47, metadata["relax.expr.Constant"][19], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=64, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv50: R.Tensor((1, 64, 15, 15), dtype="float32") = R.add(lv48, metadata["relax.expr.Constant"][20])
lv51 = R.call_tir(cls.maximum, (lv50, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 64, 15, 15), dtype="float32"))
lv52 = R.call_tir(cls.minimum, (lv51, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 64, 15, 15), dtype="float32"))
lv53: R.Tensor((1, 64, 15, 15), dtype="float32") = R.nn.conv2d(lv52, metadata["relax.expr.Constant"][21], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv55: R.Tensor((1, 96, 4, 4), dtype="float32") = R.strided_slice(lv, (R.prim_value(3),), (R.prim_value(2),), (R.prim_value(6),), (R.prim_value(1),), assume_inbound=False)
lv56: R.Tensor((1, 96, 4, 4), dtype="float32") = R.nn.conv2d(lv55, metadata["relax.expr.Constant"][22], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv58: R.Tensor((1, 96, 16, 16), dtype="float32") = R.nn.conv2d(input2, metadata["relax.expr.Constant"][23], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv60: R.Tensor((1, 96, 4, 4), dtype="float32") = R.add(lv56, metadata["relax.expr.Constant"][24])
lv61: R.Tensor((1, 96, 16, 16), dtype="float32") = R.add(lv58, metadata["relax.expr.Constant"][25])
lv62: R.Tensor((96, 1, 4, 4), dtype="float32") = R.reshape(lv60, R.shape([96, 1, 4, 4]))
lv63: R.Tensor((1, 96, 4, 4), dtype="float32") = R.nn.conv2d(lv55, metadata["relax.expr.Constant"][26], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv65: R.Tensor((1, 96, 16, 16), dtype="float32") = R.nn.conv2d(input2, metadata["relax.expr.Constant"][27], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv67: R.Tensor((1, 96, 4, 4), dtype="float32") = R.add(lv63, metadata["relax.expr.Constant"][28])
lv68: R.Tensor((1, 96, 16, 16), dtype="float32") = R.add(lv65, metadata["relax.expr.Constant"][29])
lv69: R.Tensor((96, 1, 4, 4), dtype="float32") = R.reshape(lv67, R.shape([96, 1, 4, 4]))
lv70: R.Tensor((1, 64, 15, 15), dtype="float32") = R.add(lv26, metadata["relax.expr.Constant"][30])
lv71: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv61, lv62, strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv72: R.Tensor((1, 160, 15, 15), dtype="float32") = R.concat((lv70, lv71), axis=1)
lv73: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv72, metadata["relax.expr.Constant"][31], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv75: R.Tensor((1, 64, 15, 15), dtype="float32") = R.add(lv53, metadata["relax.expr.Constant"][32])
lv76: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv68, lv69, strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv77: R.Tensor((1, 160, 15, 15), dtype="float32") = R.concat((lv75, lv76), axis=1)
lv78: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv77, metadata["relax.expr.Constant"][33], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv80: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv78, metadata["relax.expr.Constant"][34])
lv81: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv80, metadata["relax.expr.Constant"][35], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv83: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv81, metadata["relax.expr.Constant"][36])
lv84 = R.call_tir(cls.maximum1, (lv83, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv85 = R.call_tir(cls.minimum1, (lv84, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv86: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv85, metadata["relax.expr.Constant"][37], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv88: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv86, metadata["relax.expr.Constant"][38])
lv89: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv88, metadata["relax.expr.Constant"][39], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv91: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv89, metadata["relax.expr.Constant"][40])
lv92 = R.call_tir(cls.maximum1, (lv91, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv93 = R.call_tir(cls.minimum1, (lv92, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv94: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv93, metadata["relax.expr.Constant"][41], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv96: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv94, metadata["relax.expr.Constant"][42])
lv97: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv96, metadata["relax.expr.Constant"][43], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv99: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv97, metadata["relax.expr.Constant"][44])
lv100 = R.call_tir(cls.maximum1, (lv99, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv101 = R.call_tir(cls.minimum1, (lv100, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv102: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv101, metadata["relax.expr.Constant"][45], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv104: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv102, metadata["relax.expr.Constant"][46])
lv105: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv104, metadata["relax.expr.Constant"][47], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv107: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv105, metadata["relax.expr.Constant"][48])
lv108 = R.call_tir(cls.maximum1, (lv107, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv109 = R.call_tir(cls.minimum1, (lv108, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv110: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv109, metadata["relax.expr.Constant"][49], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv112: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv110, metadata["relax.expr.Constant"][50])
lv113: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv112, metadata["relax.expr.Constant"][51], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv115: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv113, metadata["relax.expr.Constant"][52])
lv116 = R.call_tir(cls.maximum1, (lv115, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv117 = R.call_tir(cls.minimum1, (lv116, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv118: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv117, metadata["relax.expr.Constant"][53], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv120: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv118, metadata["relax.expr.Constant"][54])
lv121: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv120, metadata["relax.expr.Constant"][55], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv123: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv121, metadata["relax.expr.Constant"][56])
lv124 = R.call_tir(cls.maximum1, (lv123, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv125 = R.call_tir(cls.minimum1, (lv124, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv126: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv125, metadata["relax.expr.Constant"][57], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv128: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv126, metadata["relax.expr.Constant"][58])
lv129: R.Tensor((1, 2, 15, 15), dtype="float32") = R.nn.conv2d(lv128, metadata["relax.expr.Constant"][59], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv131: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv73, metadata["relax.expr.Constant"][60])
lv132: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv131, metadata["relax.expr.Constant"][61], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv134: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv132, metadata["relax.expr.Constant"][62])
lv135 = R.call_tir(cls.maximum1, (lv134, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv136 = R.call_tir(cls.minimum1, (lv135, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv137: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv136, metadata["relax.expr.Constant"][63], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv139: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv137, metadata["relax.expr.Constant"][64])
lv140: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv139, metadata["relax.expr.Constant"][65], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv142: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv140, metadata["relax.expr.Constant"][66])
lv143 = R.call_tir(cls.maximum1, (lv142, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv144 = R.call_tir(cls.minimum1, (lv143, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv145: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv144, metadata["relax.expr.Constant"][67], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv147: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv145, metadata["relax.expr.Constant"][68])
lv148: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv147, metadata["relax.expr.Constant"][69], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv150: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv148, metadata["relax.expr.Constant"][70])
lv151 = R.call_tir(cls.maximum1, (lv150, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv152 = R.call_tir(cls.minimum1, (lv151, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv153: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv152, metadata["relax.expr.Constant"][71], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv155: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv153, metadata["relax.expr.Constant"][72])
lv156: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv155, metadata["relax.expr.Constant"][73], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv158: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv156, metadata["relax.expr.Constant"][74])
lv159 = R.call_tir(cls.maximum1, (lv158, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv160 = R.call_tir(cls.minimum1, (lv159, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv161: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv160, metadata["relax.expr.Constant"][75], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv163: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv161, metadata["relax.expr.Constant"][76])
lv164: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv163, metadata["relax.expr.Constant"][77], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv166: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv164, metadata["relax.expr.Constant"][78])
lv167 = R.call_tir(cls.maximum1, (lv166, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv168 = R.call_tir(cls.minimum1, (lv167, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv169: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv168, metadata["relax.expr.Constant"][79], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv171: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv169, metadata["relax.expr.Constant"][80])
lv172: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv171, metadata["relax.expr.Constant"][81], strides=[1, 1], padding=[1, 1, 1, 1], dilation=[1, 1], groups=96, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv174: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv172, metadata["relax.expr.Constant"][82])
lv175 = R.call_tir(cls.maximum1, (lv174, R.const(0.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv176 = R.call_tir(cls.minimum1, (lv175, R.const(6.0, "float32")), out_sinfo=R.Tensor((1, 96, 15, 15), dtype="float32"))
lv177: R.Tensor((1, 96, 15, 15), dtype="float32") = R.nn.conv2d(lv176, metadata["relax.expr.Constant"][83], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv179: R.Tensor((1, 96, 15, 15), dtype="float32") = R.add(lv177, metadata["relax.expr.Constant"][84])
lv180: R.Tensor((1, 4, 15, 15), dtype="float32") = R.nn.conv2d(lv179, metadata["relax.expr.Constant"][85], strides=[1, 1], padding=[0, 0, 0, 0], dilation=[1, 1], groups=1, data_layout="NCHW", kernel_layout="OIHW", out_layout="NCHW", out_dtype="void")
lv182: R.Tensor((1, 4, 15, 15), dtype="float32") = R.add(lv180, metadata["relax.expr.Constant"][86])
lv183: R.Tensor((1, 2, 15, 15), dtype="float32") = R.add(lv129, metadata["relax.expr.Constant"][87])
lv184: R.Tensor((1, 4, 15, 15), dtype="float32") = R.exp(lv182)
gv: R.Tuple(R.Tensor((1, 2, 15, 15), dtype="float32"), R.Tensor((1, 4, 15, 15), dtype="float32")) = lv183, lv184
R.output(gv)
return gv
# Metadata omitted. Use show_meta=True in script() method to show it.