# from tvm.script import ir as I
# from tvm.script import tir as T
# from tvm.script import relax as R
@I . ir_module
class Module :
@T . prim_func(private= True )
def maximum (A: T. Buffer((T. int64(1 ), T. int64(16 ), T. int64(128 ), T. int64(96 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(16 ), T. int64(128 ), T. int64(96 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(16 ), T. int64(128 ), T. int64(96 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum1 (A: T. Buffer((T. int64(1 ), T. int64(240 ), T. int64(32 ), T. int64(24 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(240 ), T. int64(32 ), T. int64(24 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(240 ), T. int64(32 ), T. int64(24 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum2 (A: T. Buffer((T. int64(1 ), T. int64(240 ), T. int64(16 ), T. int64(12 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(240 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(240 ), T. int64(16 ), T. int64(12 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum3 (A: T. Buffer((T. int64(1 ), T. int64(200 ), T. int64(16 ), T. int64(12 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(200 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(200 ), T. int64(16 ), T. int64(12 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum4 (A: T. Buffer((T. int64(1 ), T. int64(184 ), T. int64(16 ), T. int64(12 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(184 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(184 ), T. int64(16 ), T. int64(12 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum5 (A: T. Buffer((T. int64(1 ), T. int64(480 ), T. int64(16 ), T. int64(12 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(480 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(480 ), T. int64(16 ), T. int64(12 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum6 (A: T. Buffer((T. int64(1 ), T. int64(672 ), T. int64(16 ), T. int64(12 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(672 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(672 ), T. int64(16 ), T. int64(12 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum7 (A: T. Buffer((T. int64(1 ), T. int64(672 ), T. int64(8 ), T. int64(6 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(672 ), T. int64(8 ), T. int64(6 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(672 ), T. int64(8 ), T. int64(6 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum8 (A: T. Buffer((T. int64(1 ), T. int64(960 ), T. int64(8 ), T. int64(6 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(960 ), T. int64(8 ), T. int64(6 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(960 ), T. int64(8 ), T. int64(6 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def maximum9 (A: T. Buffer((T. int64(1 ), T. int64(1280 ), T. int64(8 ), T. int64(6 )), "float32" ), T_maximum: T. Buffer((T. int64(1 ), T. int64(1280 ), T. int64(8 ), T. int64(6 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(1280 ), T. int64(8 ), T. int64(6 )):
with T. block("T_maximum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(A[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_maximum[v_ax0, v_ax1, v_ax2, v_ax3])
T_maximum[v_ax0, v_ax1, v_ax2, v_ax3] = T. max(A[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("-inf" ))
@T . prim_func(private= True )
def minimum (lv4: T. Buffer((T. int64(1 ), T. int64(16 ), T. int64(128 ), T. int64(96 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(16 ), T. int64(128 ), T. int64(96 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(16 ), T. int64(128 ), T. int64(96 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv4[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv4[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum1 (lv114: T. Buffer((T. int64(1 ), T. int64(240 ), T. int64(32 ), T. int64(24 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(240 ), T. int64(32 ), T. int64(24 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(240 ), T. int64(32 ), T. int64(24 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv114[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv114[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum2 (lv122: T. Buffer((T. int64(1 ), T. int64(240 ), T. int64(16 ), T. int64(12 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(240 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(240 ), T. int64(16 ), T. int64(12 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv122[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv122[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum3 (lv133: T. Buffer((T. int64(1 ), T. int64(200 ), T. int64(16 ), T. int64(12 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(200 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(200 ), T. int64(16 ), T. int64(12 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv133[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv133[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum4 (lv153: T. Buffer((T. int64(1 ), T. int64(184 ), T. int64(16 ), T. int64(12 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(184 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(184 ), T. int64(16 ), T. int64(12 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv153[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv153[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum5 (lv193: T. Buffer((T. int64(1 ), T. int64(480 ), T. int64(16 ), T. int64(12 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(480 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(480 ), T. int64(16 ), T. int64(12 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv193[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv193[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum6 (lv224: T. Buffer((T. int64(1 ), T. int64(672 ), T. int64(16 ), T. int64(12 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(672 ), T. int64(16 ), T. int64(12 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(672 ), T. int64(16 ), T. int64(12 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv224[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv224[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum7 (lv264: T. Buffer((T. int64(1 ), T. int64(672 ), T. int64(8 ), T. int64(6 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(672 ), T. int64(8 ), T. int64(6 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(672 ), T. int64(8 ), T. int64(6 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv264[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv264[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum8 (lv287: T. Buffer((T. int64(1 ), T. int64(960 ), T. int64(8 ), T. int64(6 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(960 ), T. int64(8 ), T. int64(6 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(960 ), T. int64(8 ), T. int64(6 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv287[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv287[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@T . prim_func(private= True )
def minimum9 (lv358: T. Buffer((T. int64(1 ), T. int64(1280 ), T. int64(8 ), T. int64(6 )), "float32" ), T_minimum: T. Buffer((T. int64(1 ), T. int64(1280 ), T. int64(8 ), T. int64(6 )), "float32" )):
T. func_attr({"tir.noalias" : T. bool(True )})
# with T.block("root"):
for ax0, ax1, ax2, ax3 in T. grid(T. int64(1 ), T. int64(1280 ), T. int64(8 ), T. int64(6 )):
with T. block("T_minimum" ):
v_ax0, v_ax1, v_ax2, v_ax3 = T. axis. remap("SSSS" , [ax0, ax1, ax2, ax3])
T. reads(lv358[v_ax0, v_ax1, v_ax2, v_ax3])
T. writes(T_minimum[v_ax0, v_ax1, v_ax2, v_ax3])
T_minimum[v_ax0, v_ax1, v_ax2, v_ax3] = T. min(lv358[v_ax0, v_ax1, v_ax2, v_ax3], T. float32("inf" ))
@R . function
def main (images: R. Tensor((1 , 3 , 256 , 192 ), dtype= "float32" )) -> R. Tensor((1 , 9 ), dtype= "float32" ):
R. func_attr({"num_input" : 1 })
cls = Module
with R. dataflow():
lv: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. nn. conv2d(images, metadata["relax.expr.Constant" ][0 ], strides= [2 , 2 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv1: R. Tensor((1 , 16 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][1 ], R. shape([1 , 16 , 1 , 1 ]))
lv2: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. add(lv, lv1)
lv3: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. add(lv2, R. const(3.0 , "float32" ))
lv4 = R. call_tir(cls. maximum, (lv3,), out_sinfo= R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ))
lv5 = R. call_tir(cls. minimum, (lv4,), out_sinfo= R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ))
lv6: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. divide(lv5, R. const(6.0 , "float32" ))
lv7: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. multiply(lv2, lv6)
lv8: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. nn. conv2d(lv7, metadata["relax.expr.Constant" ][2 ], strides= [1 , 1 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 16 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv9: R. Tensor((1 , 16 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][3 ], R. shape([1 , 16 , 1 , 1 ]))
lv10: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. add(lv8, lv9)
lv11: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. nn. relu(lv10)
lv12: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. nn. conv2d(lv11, metadata["relax.expr.Constant" ][4 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv13: R. Tensor((1 , 16 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][5 ], R. shape([1 , 16 , 1 , 1 ]))
lv14: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. add(lv12, lv13)
lv15: R. Tensor((1 , 16 , 128 , 96 ), dtype= "float32" ) = R. add(lv14, lv7)
lv16: R. Tensor((1 , 64 , 128 , 96 ), dtype= "float32" ) = R. nn. conv2d(lv15, metadata["relax.expr.Constant" ][6 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv17: R. Tensor((1 , 64 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][7 ], R. shape([1 , 64 , 1 , 1 ]))
lv18: R. Tensor((1 , 64 , 128 , 96 ), dtype= "float32" ) = R. add(lv16, lv17)
lv19: R. Tensor((1 , 64 , 128 , 96 ), dtype= "float32" ) = R. nn. relu(lv18)
lv20: R. Tensor((1 , 64 , 64 , 48 ), dtype= "float32" ) = R. nn. conv2d(lv19, metadata["relax.expr.Constant" ][8 ], strides= [2 , 2 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 64 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv21: R. Tensor((1 , 64 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][9 ], R. shape([1 , 64 , 1 , 1 ]))
lv22: R. Tensor((1 , 64 , 64 , 48 ), dtype= "float32" ) = R. add(lv20, lv21)
lv23: R. Tensor((1 , 64 , 64 , 48 ), dtype= "float32" ) = R. nn. relu(lv22)
lv24: R. Tensor((1 , 24 , 64 , 48 ), dtype= "float32" ) = R. nn. conv2d(lv23, metadata["relax.expr.Constant" ][10 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv25: R. Tensor((1 , 24 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][11 ], R. shape([1 , 24 , 1 , 1 ]))
lv26: R. Tensor((1 , 24 , 64 , 48 ), dtype= "float32" ) = R. add(lv24, lv25)
lv27: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. nn. conv2d(lv26, metadata["relax.expr.Constant" ][12 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv28: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][13 ], R. shape([1 , 72 , 1 , 1 ]))
lv29: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. add(lv27, lv28)
lv30: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. nn. relu(lv29)
lv31: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. nn. conv2d(lv30, metadata["relax.expr.Constant" ][14 ], strides= [1 , 1 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 72 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv32: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][15 ], R. shape([1 , 72 , 1 , 1 ]))
lv33: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. add(lv31, lv32)
lv34: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. nn. relu(lv33)
lv35: R. Tensor((1 , 24 , 64 , 48 ), dtype= "float32" ) = R. nn. conv2d(lv34, metadata["relax.expr.Constant" ][16 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv36: R. Tensor((1 , 24 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][17 ], R. shape([1 , 24 , 1 , 1 ]))
lv37: R. Tensor((1 , 24 , 64 , 48 ), dtype= "float32" ) = R. add(lv35, lv36)
lv38: R. Tensor((1 , 24 , 64 , 48 ), dtype= "float32" ) = R. add(lv37, lv26)
lv39: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. nn. conv2d(lv38, metadata["relax.expr.Constant" ][18 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv40: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][19 ], R. shape([1 , 72 , 1 , 1 ]))
lv41: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. add(lv39, lv40)
lv42: R. Tensor((1 , 72 , 64 , 48 ), dtype= "float32" ) = R. nn. relu(lv41)
lv43: R. Tensor((1 , 72 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv42, metadata["relax.expr.Constant" ][20 ], strides= [2 , 2 ], padding= [2 , 2 , 2 , 2 ], dilation= [1 , 1 ], groups= 72 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv44: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][21 ], R. shape([1 , 72 , 1 , 1 ]))
lv45: R. Tensor((1 , 72 , 32 , 24 ), dtype= "float32" ) = R. add(lv43, lv44)
lv46: R. Tensor((1 , 72 , 32 , 24 ), dtype= "float32" ) = R. nn. relu(lv45)
lv47: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. mean(lv46, axis= [2 , 3 ], keepdims= True )
lv48: R. Tensor((1 , 24 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv47, metadata["relax.expr.Constant" ][22 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv49: R. Tensor((1 , 24 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][23 ], R. shape([1 , 24 , 1 , 1 ]))
lv50: R. Tensor((1 , 24 , 1 , 1 ), dtype= "float32" ) = R. add(lv48, lv49)
lv51: R. Tensor((1 , 24 , 1 , 1 ), dtype= "float32" ) = R. nn. relu(lv50)
lv52: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv51, metadata["relax.expr.Constant" ][24 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv53: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][25 ], R. shape([1 , 72 , 1 , 1 ]))
lv54: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. add(lv52, lv53)
lv55: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. multiply(R. const(0.1666666716337204 , "float32" ), lv54)
lv56: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. add(lv55, R. const(0.5 , "float32" ))
lv57: R. Tensor((1 , 72 , 1 , 1 ), dtype= "float32" ) = R. clip(lv56, R. prim_value(0 ), R. prim_value(1 ))
lv58: R. Tensor((1 , 72 , 32 , 24 ), dtype= "float32" ) = R. multiply(lv57, lv46)
lv59: R. Tensor((1 , 40 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv58, metadata["relax.expr.Constant" ][26 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv60: R. Tensor((1 , 40 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][27 ], R. shape([1 , 40 , 1 , 1 ]))
lv61: R. Tensor((1 , 40 , 32 , 24 ), dtype= "float32" ) = R. add(lv59, lv60)
lv62: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv61, metadata["relax.expr.Constant" ][28 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv63: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][29 ], R. shape([1 , 120 , 1 , 1 ]))
lv64: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. add(lv62, lv63)
lv65: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. nn. relu(lv64)
lv66: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv65, metadata["relax.expr.Constant" ][30 ], strides= [1 , 1 ], padding= [2 , 2 , 2 , 2 ], dilation= [1 , 1 ], groups= 120 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv67: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][31 ], R. shape([1 , 120 , 1 , 1 ]))
lv68: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. add(lv66, lv67)
lv69: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. nn. relu(lv68)
lv70: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. mean(lv69, axis= [2 , 3 ], keepdims= True )
lv71: R. Tensor((1 , 32 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv70, metadata["relax.expr.Constant" ][32 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv72: R. Tensor((1 , 32 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][33 ], R. shape([1 , 32 , 1 , 1 ]))
lv73: R. Tensor((1 , 32 , 1 , 1 ), dtype= "float32" ) = R. add(lv71, lv72)
lv74: R. Tensor((1 , 32 , 1 , 1 ), dtype= "float32" ) = R. nn. relu(lv73)
lv75: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv74, metadata["relax.expr.Constant" ][34 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv76: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][35 ], R. shape([1 , 120 , 1 , 1 ]))
lv77: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. add(lv75, lv76)
lv78: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. multiply(R. const(0.1666666716337204 , "float32" ), lv77)
lv79: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. add(lv78, R. const(0.5 , "float32" ))
lv80: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. clip(lv79, R. prim_value(0 ), R. prim_value(1 ))
lv81: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. multiply(lv80, lv69)
lv82: R. Tensor((1 , 40 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv81, metadata["relax.expr.Constant" ][36 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv83: R. Tensor((1 , 40 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][37 ], R. shape([1 , 40 , 1 , 1 ]))
lv84: R. Tensor((1 , 40 , 32 , 24 ), dtype= "float32" ) = R. add(lv82, lv83)
lv85: R. Tensor((1 , 40 , 32 , 24 ), dtype= "float32" ) = R. add(lv84, lv61)
lv86: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv85, metadata["relax.expr.Constant" ][38 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv87: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][39 ], R. shape([1 , 120 , 1 , 1 ]))
lv88: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. add(lv86, lv87)
lv89: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. nn. relu(lv88)
lv90: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv89, metadata["relax.expr.Constant" ][40 ], strides= [1 , 1 ], padding= [2 , 2 , 2 , 2 ], dilation= [1 , 1 ], groups= 120 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv91: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][41 ], R. shape([1 , 120 , 1 , 1 ]))
lv92: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. add(lv90, lv91)
lv93: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. nn. relu(lv92)
lv94: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. mean(lv93, axis= [2 , 3 ], keepdims= True )
lv95: R. Tensor((1 , 32 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv94, metadata["relax.expr.Constant" ][42 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv96: R. Tensor((1 , 32 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][43 ], R. shape([1 , 32 , 1 , 1 ]))
lv97: R. Tensor((1 , 32 , 1 , 1 ), dtype= "float32" ) = R. add(lv95, lv96)
lv98: R. Tensor((1 , 32 , 1 , 1 ), dtype= "float32" ) = R. nn. relu(lv97)
lv99: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv98, metadata["relax.expr.Constant" ][44 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv100: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][45 ], R. shape([1 , 120 , 1 , 1 ]))
lv101: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. add(lv99, lv100)
lv102: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. multiply(R. const(0.1666666716337204 , "float32" ), lv101)
lv103: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. add(lv102, R. const(0.5 , "float32" ))
lv104: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. clip(lv103, R. prim_value(0 ), R. prim_value(1 ))
lv105: R. Tensor((1 , 120 , 32 , 24 ), dtype= "float32" ) = R. multiply(lv104, lv93)
lv106: R. Tensor((1 , 40 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv105, metadata["relax.expr.Constant" ][46 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv107: R. Tensor((1 , 40 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][47 ], R. shape([1 , 40 , 1 , 1 ]))
lv108: R. Tensor((1 , 40 , 32 , 24 ), dtype= "float32" ) = R. add(lv106, lv107)
lv109: R. Tensor((1 , 40 , 32 , 24 ), dtype= "float32" ) = R. add(lv108, lv85)
lv110: R. Tensor((1 , 240 , 32 , 24 ), dtype= "float32" ) = R. nn. conv2d(lv109, metadata["relax.expr.Constant" ][48 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv111: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][49 ], R. shape([1 , 240 , 1 , 1 ]))
lv112: R. Tensor((1 , 240 , 32 , 24 ), dtype= "float32" ) = R. add(lv110, lv111)
lv113: R. Tensor((1 , 240 , 32 , 24 ), dtype= "float32" ) = R. add(lv112, R. const(3.0 , "float32" ))
lv114 = R. call_tir(cls. maximum1, (lv113,), out_sinfo= R. Tensor((1 , 240 , 32 , 24 ), dtype= "float32" ))
lv115 = R. call_tir(cls. minimum1, (lv114,), out_sinfo= R. Tensor((1 , 240 , 32 , 24 ), dtype= "float32" ))
lv116: R. Tensor((1 , 240 , 32 , 24 ), dtype= "float32" ) = R. divide(lv115, R. const(6.0 , "float32" ))
lv117: R. Tensor((1 , 240 , 32 , 24 ), dtype= "float32" ) = R. multiply(lv112, lv116)
lv118: R. Tensor((1 , 240 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv117, metadata["relax.expr.Constant" ][50 ], strides= [2 , 2 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 240 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv119: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][51 ], R. shape([1 , 240 , 1 , 1 ]))
lv120: R. Tensor((1 , 240 , 16 , 12 ), dtype= "float32" ) = R. add(lv118, lv119)
lv121: R. Tensor((1 , 240 , 16 , 12 ), dtype= "float32" ) = R. add(lv120, R. const(3.0 , "float32" ))
lv122 = R. call_tir(cls. maximum2, (lv121,), out_sinfo= R. Tensor((1 , 240 , 16 , 12 ), dtype= "float32" ))
lv123 = R. call_tir(cls. minimum2, (lv122,), out_sinfo= R. Tensor((1 , 240 , 16 , 12 ), dtype= "float32" ))
lv124: R. Tensor((1 , 240 , 16 , 12 ), dtype= "float32" ) = R. divide(lv123, R. const(6.0 , "float32" ))
lv125: R. Tensor((1 , 240 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv120, lv124)
lv126: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv125, metadata["relax.expr.Constant" ][52 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv127: R. Tensor((1 , 80 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][53 ], R. shape([1 , 80 , 1 , 1 ]))
lv128: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. add(lv126, lv127)
lv129: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv128, metadata["relax.expr.Constant" ][54 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv130: R. Tensor((1 , 200 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][55 ], R. shape([1 , 200 , 1 , 1 ]))
lv131: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. add(lv129, lv130)
lv132: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. add(lv131, R. const(3.0 , "float32" ))
lv133 = R. call_tir(cls. maximum3, (lv132,), out_sinfo= R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ))
lv134 = R. call_tir(cls. minimum3, (lv133,), out_sinfo= R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ))
lv135: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. divide(lv134, R. const(6.0 , "float32" ))
lv136: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv131, lv135)
lv137: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv136, metadata["relax.expr.Constant" ][56 ], strides= [1 , 1 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 200 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv138: R. Tensor((1 , 200 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][57 ], R. shape([1 , 200 , 1 , 1 ]))
lv139: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. add(lv137, lv138)
lv140: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. add(lv139, R. const(3.0 , "float32" ))
lv141 = R. call_tir(cls. maximum3, (lv140,), out_sinfo= R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ))
lv142 = R. call_tir(cls. minimum3, (lv141,), out_sinfo= R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ))
lv143: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. divide(lv142, R. const(6.0 , "float32" ))
lv144: R. Tensor((1 , 200 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv139, lv143)
lv145: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv144, metadata["relax.expr.Constant" ][58 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv146: R. Tensor((1 , 80 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][59 ], R. shape([1 , 80 , 1 , 1 ]))
lv147: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. add(lv145, lv146)
lv148: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. add(lv147, lv128)
lv149: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv148, metadata["relax.expr.Constant" ][60 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv150: R. Tensor((1 , 184 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][61 ], R. shape([1 , 184 , 1 , 1 ]))
lv151: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. add(lv149, lv150)
lv152: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. add(lv151, R. const(3.0 , "float32" ))
lv153 = R. call_tir(cls. maximum4, (lv152,), out_sinfo= R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ))
lv154 = R. call_tir(cls. minimum4, (lv153,), out_sinfo= R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ))
lv155: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. divide(lv154, R. const(6.0 , "float32" ))
lv156: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv151, lv155)
lv157: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv156, metadata["relax.expr.Constant" ][62 ], strides= [1 , 1 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 184 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv158: R. Tensor((1 , 184 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][63 ], R. shape([1 , 184 , 1 , 1 ]))
lv159: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. add(lv157, lv158)
lv160: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. add(lv159, R. const(3.0 , "float32" ))
lv161 = R. call_tir(cls. maximum4, (lv160,), out_sinfo= R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ))
lv162 = R. call_tir(cls. minimum4, (lv161,), out_sinfo= R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ))
lv163: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. divide(lv162, R. const(6.0 , "float32" ))
lv164: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv159, lv163)
lv165: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv164, metadata["relax.expr.Constant" ][64 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv166: R. Tensor((1 , 80 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][65 ], R. shape([1 , 80 , 1 , 1 ]))
lv167: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. add(lv165, lv166)
lv168: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. add(lv167, lv148)
lv169: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv168, metadata["relax.expr.Constant" ][66 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv170: R. Tensor((1 , 184 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][67 ], R. shape([1 , 184 , 1 , 1 ]))
lv171: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. add(lv169, lv170)
lv172: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. add(lv171, R. const(3.0 , "float32" ))
lv173 = R. call_tir(cls. maximum4, (lv172,), out_sinfo= R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ))
lv174 = R. call_tir(cls. minimum4, (lv173,), out_sinfo= R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ))
lv175: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. divide(lv174, R. const(6.0 , "float32" ))
lv176: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv171, lv175)
lv177: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv176, metadata["relax.expr.Constant" ][68 ], strides= [1 , 1 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 184 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv178: R. Tensor((1 , 184 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][69 ], R. shape([1 , 184 , 1 , 1 ]))
lv179: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. add(lv177, lv178)
lv180: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. add(lv179, R. const(3.0 , "float32" ))
lv181 = R. call_tir(cls. maximum4, (lv180,), out_sinfo= R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ))
lv182 = R. call_tir(cls. minimum4, (lv181,), out_sinfo= R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ))
lv183: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. divide(lv182, R. const(6.0 , "float32" ))
lv184: R. Tensor((1 , 184 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv179, lv183)
lv185: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv184, metadata["relax.expr.Constant" ][70 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv186: R. Tensor((1 , 80 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][71 ], R. shape([1 , 80 , 1 , 1 ]))
lv187: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. add(lv185, lv186)
lv188: R. Tensor((1 , 80 , 16 , 12 ), dtype= "float32" ) = R. add(lv187, lv168)
lv189: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv188, metadata["relax.expr.Constant" ][72 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv190: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][73 ], R. shape([1 , 480 , 1 , 1 ]))
lv191: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. add(lv189, lv190)
lv192: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. add(lv191, R. const(3.0 , "float32" ))
lv193 = R. call_tir(cls. maximum5, (lv192,), out_sinfo= R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ))
lv194 = R. call_tir(cls. minimum5, (lv193,), out_sinfo= R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ))
lv195: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. divide(lv194, R. const(6.0 , "float32" ))
lv196: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv191, lv195)
lv197: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv196, metadata["relax.expr.Constant" ][74 ], strides= [1 , 1 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 480 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv198: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][75 ], R. shape([1 , 480 , 1 , 1 ]))
lv199: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. add(lv197, lv198)
lv200: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. add(lv199, R. const(3.0 , "float32" ))
lv201 = R. call_tir(cls. maximum5, (lv200,), out_sinfo= R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ))
lv202 = R. call_tir(cls. minimum5, (lv201,), out_sinfo= R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ))
lv203: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. divide(lv202, R. const(6.0 , "float32" ))
lv204: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv199, lv203)
lv205: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. mean(lv204, axis= [2 , 3 ], keepdims= True )
lv206: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv205, metadata["relax.expr.Constant" ][76 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv207: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][77 ], R. shape([1 , 120 , 1 , 1 ]))
lv208: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. add(lv206, lv207)
lv209: R. Tensor((1 , 120 , 1 , 1 ), dtype= "float32" ) = R. nn. relu(lv208)
lv210: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv209, metadata["relax.expr.Constant" ][78 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv211: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][79 ], R. shape([1 , 480 , 1 , 1 ]))
lv212: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. add(lv210, lv211)
lv213: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. multiply(R. const(0.1666666716337204 , "float32" ), lv212)
lv214: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. add(lv213, R. const(0.5 , "float32" ))
lv215: R. Tensor((1 , 480 , 1 , 1 ), dtype= "float32" ) = R. clip(lv214, R. prim_value(0 ), R. prim_value(1 ))
lv216: R. Tensor((1 , 480 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv215, lv204)
lv217: R. Tensor((1 , 112 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv216, metadata["relax.expr.Constant" ][80 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv218: R. Tensor((1 , 112 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][81 ], R. shape([1 , 112 , 1 , 1 ]))
lv219: R. Tensor((1 , 112 , 16 , 12 ), dtype= "float32" ) = R. add(lv217, lv218)
lv220: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv219, metadata["relax.expr.Constant" ][82 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv221: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][83 ], R. shape([1 , 672 , 1 , 1 ]))
lv222: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. add(lv220, lv221)
lv223: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. add(lv222, R. const(3.0 , "float32" ))
lv224 = R. call_tir(cls. maximum6, (lv223,), out_sinfo= R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ))
lv225 = R. call_tir(cls. minimum6, (lv224,), out_sinfo= R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ))
lv226: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. divide(lv225, R. const(6.0 , "float32" ))
lv227: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv222, lv226)
lv228: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv227, metadata["relax.expr.Constant" ][84 ], strides= [1 , 1 ], padding= [1 , 1 , 1 , 1 ], dilation= [1 , 1 ], groups= 672 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv229: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][85 ], R. shape([1 , 672 , 1 , 1 ]))
lv230: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. add(lv228, lv229)
lv231: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. add(lv230, R. const(3.0 , "float32" ))
lv232 = R. call_tir(cls. maximum6, (lv231,), out_sinfo= R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ))
lv233 = R. call_tir(cls. minimum6, (lv232,), out_sinfo= R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ))
lv234: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. divide(lv233, R. const(6.0 , "float32" ))
lv235: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv230, lv234)
lv236: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. mean(lv235, axis= [2 , 3 ], keepdims= True )
lv237: R. Tensor((1 , 168 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv236, metadata["relax.expr.Constant" ][86 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv238: R. Tensor((1 , 168 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][87 ], R. shape([1 , 168 , 1 , 1 ]))
lv239: R. Tensor((1 , 168 , 1 , 1 ), dtype= "float32" ) = R. add(lv237, lv238)
lv240: R. Tensor((1 , 168 , 1 , 1 ), dtype= "float32" ) = R. nn. relu(lv239)
lv241: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv240, metadata["relax.expr.Constant" ][88 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv242: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][89 ], R. shape([1 , 672 , 1 , 1 ]))
lv243: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. add(lv241, lv242)
lv244: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. multiply(R. const(0.1666666716337204 , "float32" ), lv243)
lv245: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. add(lv244, R. const(0.5 , "float32" ))
lv246: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. clip(lv245, R. prim_value(0 ), R. prim_value(1 ))
lv247: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv246, lv235)
lv248: R. Tensor((1 , 112 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv247, metadata["relax.expr.Constant" ][90 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv249: R. Tensor((1 , 112 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][91 ], R. shape([1 , 112 , 1 , 1 ]))
lv250: R. Tensor((1 , 112 , 16 , 12 ), dtype= "float32" ) = R. add(lv248, lv249)
lv251: R. Tensor((1 , 112 , 16 , 12 ), dtype= "float32" ) = R. add(lv250, lv219)
lv252: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. nn. conv2d(lv251, metadata["relax.expr.Constant" ][92 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv253: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][93 ], R. shape([1 , 672 , 1 , 1 ]))
lv254: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. add(lv252, lv253)
lv255: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. add(lv254, R. const(3.0 , "float32" ))
lv256 = R. call_tir(cls. maximum6, (lv255,), out_sinfo= R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ))
lv257 = R. call_tir(cls. minimum6, (lv256,), out_sinfo= R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ))
lv258: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. divide(lv257, R. const(6.0 , "float32" ))
lv259: R. Tensor((1 , 672 , 16 , 12 ), dtype= "float32" ) = R. multiply(lv254, lv258)
lv260: R. Tensor((1 , 672 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv259, metadata["relax.expr.Constant" ][94 ], strides= [2 , 2 ], padding= [2 , 2 , 2 , 2 ], dilation= [1 , 1 ], groups= 672 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv261: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][95 ], R. shape([1 , 672 , 1 , 1 ]))
lv262: R. Tensor((1 , 672 , 8 , 6 ), dtype= "float32" ) = R. add(lv260, lv261)
lv263: R. Tensor((1 , 672 , 8 , 6 ), dtype= "float32" ) = R. add(lv262, R. const(3.0 , "float32" ))
lv264 = R. call_tir(cls. maximum7, (lv263,), out_sinfo= R. Tensor((1 , 672 , 8 , 6 ), dtype= "float32" ))
lv265 = R. call_tir(cls. minimum7, (lv264,), out_sinfo= R. Tensor((1 , 672 , 8 , 6 ), dtype= "float32" ))
lv266: R. Tensor((1 , 672 , 8 , 6 ), dtype= "float32" ) = R. divide(lv265, R. const(6.0 , "float32" ))
lv267: R. Tensor((1 , 672 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv262, lv266)
lv268: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. mean(lv267, axis= [2 , 3 ], keepdims= True )
lv269: R. Tensor((1 , 168 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv268, metadata["relax.expr.Constant" ][96 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv270: R. Tensor((1 , 168 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][97 ], R. shape([1 , 168 , 1 , 1 ]))
lv271: R. Tensor((1 , 168 , 1 , 1 ), dtype= "float32" ) = R. add(lv269, lv270)
lv272: R. Tensor((1 , 168 , 1 , 1 ), dtype= "float32" ) = R. nn. relu(lv271)
lv273: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv272, metadata["relax.expr.Constant" ][98 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv274: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][99 ], R. shape([1 , 672 , 1 , 1 ]))
lv275: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. add(lv273, lv274)
lv276: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. multiply(R. const(0.1666666716337204 , "float32" ), lv275)
lv277: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. add(lv276, R. const(0.5 , "float32" ))
lv278: R. Tensor((1 , 672 , 1 , 1 ), dtype= "float32" ) = R. clip(lv277, R. prim_value(0 ), R. prim_value(1 ))
lv279: R. Tensor((1 , 672 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv278, lv267)
lv280: R. Tensor((1 , 160 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv279, metadata["relax.expr.Constant" ][100 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv281: R. Tensor((1 , 160 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][101 ], R. shape([1 , 160 , 1 , 1 ]))
lv282: R. Tensor((1 , 160 , 8 , 6 ), dtype= "float32" ) = R. add(lv280, lv281)
lv283: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv282, metadata["relax.expr.Constant" ][102 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv284: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][103 ], R. shape([1 , 960 , 1 , 1 ]))
lv285: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv283, lv284)
lv286: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv285, R. const(3.0 , "float32" ))
lv287 = R. call_tir(cls. maximum8, (lv286,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv288 = R. call_tir(cls. minimum8, (lv287,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv289: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. divide(lv288, R. const(6.0 , "float32" ))
lv290: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv285, lv289)
lv291: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv290, metadata["relax.expr.Constant" ][104 ], strides= [1 , 1 ], padding= [2 , 2 , 2 , 2 ], dilation= [1 , 1 ], groups= 960 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv292: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][105 ], R. shape([1 , 960 , 1 , 1 ]))
lv293: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv291, lv292)
lv294: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv293, R. const(3.0 , "float32" ))
lv295 = R. call_tir(cls. maximum8, (lv294,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv296 = R. call_tir(cls. minimum8, (lv295,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv297: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. divide(lv296, R. const(6.0 , "float32" ))
lv298: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv293, lv297)
lv299: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. mean(lv298, axis= [2 , 3 ], keepdims= True )
lv300: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv299, metadata["relax.expr.Constant" ][106 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv301: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][107 ], R. shape([1 , 240 , 1 , 1 ]))
lv302: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. add(lv300, lv301)
lv303: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. nn. relu(lv302)
lv304: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv303, metadata["relax.expr.Constant" ][108 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv305: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][109 ], R. shape([1 , 960 , 1 , 1 ]))
lv306: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. add(lv304, lv305)
lv307: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. multiply(R. const(0.1666666716337204 , "float32" ), lv306)
lv308: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. add(lv307, R. const(0.5 , "float32" ))
lv309: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. clip(lv308, R. prim_value(0 ), R. prim_value(1 ))
lv310: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv309, lv298)
lv311: R. Tensor((1 , 160 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv310, metadata["relax.expr.Constant" ][110 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv312: R. Tensor((1 , 160 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][111 ], R. shape([1 , 160 , 1 , 1 ]))
lv313: R. Tensor((1 , 160 , 8 , 6 ), dtype= "float32" ) = R. add(lv311, lv312)
lv314: R. Tensor((1 , 160 , 8 , 6 ), dtype= "float32" ) = R. add(lv313, lv282)
lv315: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv314, metadata["relax.expr.Constant" ][112 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv316: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][113 ], R. shape([1 , 960 , 1 , 1 ]))
lv317: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv315, lv316)
lv318: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv317, R. const(3.0 , "float32" ))
lv319 = R. call_tir(cls. maximum8, (lv318,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv320 = R. call_tir(cls. minimum8, (lv319,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv321: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. divide(lv320, R. const(6.0 , "float32" ))
lv322: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv317, lv321)
lv323: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv322, metadata["relax.expr.Constant" ][114 ], strides= [1 , 1 ], padding= [2 , 2 , 2 , 2 ], dilation= [1 , 1 ], groups= 960 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv324: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][115 ], R. shape([1 , 960 , 1 , 1 ]))
lv325: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv323, lv324)
lv326: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv325, R. const(3.0 , "float32" ))
lv327 = R. call_tir(cls. maximum8, (lv326,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv328 = R. call_tir(cls. minimum8, (lv327,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv329: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. divide(lv328, R. const(6.0 , "float32" ))
lv330: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv325, lv329)
lv331: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. mean(lv330, axis= [2 , 3 ], keepdims= True )
lv332: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv331, metadata["relax.expr.Constant" ][116 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv333: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][117 ], R. shape([1 , 240 , 1 , 1 ]))
lv334: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. add(lv332, lv333)
lv335: R. Tensor((1 , 240 , 1 , 1 ), dtype= "float32" ) = R. nn. relu(lv334)
lv336: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. nn. conv2d(lv335, metadata["relax.expr.Constant" ][118 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv337: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][119 ], R. shape([1 , 960 , 1 , 1 ]))
lv338: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. add(lv336, lv337)
lv339: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. multiply(R. const(0.1666666716337204 , "float32" ), lv338)
lv340: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. add(lv339, R. const(0.5 , "float32" ))
lv341: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. clip(lv340, R. prim_value(0 ), R. prim_value(1 ))
lv342: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv341, lv330)
lv343: R. Tensor((1 , 160 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv342, metadata["relax.expr.Constant" ][120 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv344: R. Tensor((1 , 160 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][121 ], R. shape([1 , 160 , 1 , 1 ]))
lv345: R. Tensor((1 , 160 , 8 , 6 ), dtype= "float32" ) = R. add(lv343, lv344)
lv346: R. Tensor((1 , 160 , 8 , 6 ), dtype= "float32" ) = R. add(lv345, lv314)
lv347: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv346, metadata["relax.expr.Constant" ][122 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv348: R. Tensor((1 , 960 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][123 ], R. shape([1 , 960 , 1 , 1 ]))
lv349: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv347, lv348)
lv350: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. add(lv349, R. const(3.0 , "float32" ))
lv351 = R. call_tir(cls. maximum8, (lv350,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv352 = R. call_tir(cls. minimum8, (lv351,), out_sinfo= R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ))
lv353: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. divide(lv352, R. const(6.0 , "float32" ))
lv354: R. Tensor((1 , 960 , 8 , 6 ), dtype= "float32" ) = R. multiply(lv349, lv353)
lv355: R. Tensor((1 , 1280 , 8 , 6 ), dtype= "float32" ) = R. nn. conv2d(lv354, metadata["relax.expr.Constant" ][124 ], strides= [1 , 1 ], padding= [0 , 0 , 0 , 0 ], dilation= [1 , 1 ], groups= 1 , data_layout= "NCHW" , kernel_layout= "OIHW" , out_layout= "NCHW" , out_dtype= "void" )
lv356: R. Tensor((1 , 1280 , 1 , 1 ), dtype= "float32" ) = R. reshape(metadata["relax.expr.Constant" ][125 ], R. shape([1 , 1280 , 1 , 1 ]))
lv357: R. Tensor((1 , 1280 , 8 , 6 ), dtype= "float32" ) = R. add(lv355, lv356)
lv358 = R. call_tir(cls. maximum9, (lv357,), out_sinfo= R. Tensor((1 , 1280 , 8 , 6 ), dtype= "float32" ))
lv359 = R. call_tir(cls. minimum9, (lv358,), out_sinfo= R. Tensor((1 , 1280 , 8 , 6 ), dtype= "float32" ))
lv360: R. Tensor((1 , 1280 , 48 ), dtype= "float32" ) = R. reshape(lv359, R. shape([1 , 1280 , 48 ]))
lv361: R. Tensor((1 , 1280 ), dtype= "float32" ) = R. mean(lv360, axis= [- 1 ], keepdims= False )
lv362: R. Tensor((1 , 1280 , 1 , 1 ), dtype= "float32" ) = R. reshape(lv361, R. shape([1 , 1280 , 1 , 1 ]))
lv363: R. Tuple(R. Tensor((1 , 1280 , 1 , 1 ), dtype= "float32" ), R. Tensor((1280 ,), dtype= "float32" ), R. Tensor((1280 ,), dtype= "float32" )) = R. nn. batch_norm(lv362, metadata["relax.expr.Constant" ][126 ], metadata["relax.expr.Constant" ][127 ], metadata["relax.expr.Constant" ][128 ], metadata["relax.expr.Constant" ][129 ], axis= 1 , epsilon= 9.9999997473787516e-06 , center= True , scale= True , momentum= 0.10000000000000001 )
lv364: R. Tensor((1 , 1280 , 1 , 1 ), dtype= "float32" ) = lv363[0 ]
lv365: R. Tensor((1280 ,), dtype= "float32" ) = lv363[1 ]
lv366: R. Tensor((1280 ,), dtype= "float32" ) = lv363[2 ]
lv367: R. Tensor((1 , 1280 ), dtype= "float32" ) = R. reshape(lv364, R. shape([1 , 1280 ]))
lv368: R. Tensor((1 , 9 ), dtype= "float32" ) = R. matmul(lv367, metadata["relax.expr.Constant" ][130 ], out_dtype= "void" )
lv369: R. Tuple(R. Tensor((1 , 9 ), dtype= "float32" ), R. Tensor((9 ,), dtype= "float32" ), R. Tensor((9 ,), dtype= "float32" )) = R. nn. batch_norm(lv368, metadata["relax.expr.Constant" ][131 ], metadata["relax.expr.Constant" ][132 ], metadata["relax.expr.Constant" ][133 ], metadata["relax.expr.Constant" ][134 ], axis= 1 , epsilon= 9.9999997473787516e-06 , center= True , scale= True , momentum= 0.10000000000000001 )
lv370: R. Tensor((1 , 9 ), dtype= "float32" ) = lv369[0 ]
lv371: R. Tensor((9 ,), dtype= "float32" ) = lv369[1 ]
lv372: R. Tensor((9 ,), dtype= "float32" ) = lv369[2 ]
gv: R. Tensor((1 , 9 ), dtype= "float32" ) = lv370
R. output(gv)
return gv
# Metadata omitted. Use show_meta=True in script() method to show it.
复制到剪贴板