前向折叠 Let 失败

前向折叠 Let 失败#

%cd ..
import set_env
/media/pc/data/lxw/ai/tvm-book/tests/book/doc/tests
import numpy as np

import tvm
from tvm import relay
from tvm.relay import transform
# from tvm.relay.testing import create_workload
# from tvm.relay.build_module import bind_params_by_name


def initializer(_, param):
    param = np.zeros(param.shape)


def _get_positive_scale(size):
    return np.random.uniform(0.5, 1, size=size).astype("float32")


def run_opt_pass(expr, opt_pass):
    assert isinstance(opt_pass, tvm.transform.Pass)
    mod = tvm.IRModule.from_expr(expr)
    mod = opt_pass(mod)
    entry = mod["main"]
    return entry if isinstance(expr, relay.Function) else entry.body

因为缩放无法通过 ReLU,所以我们不能折叠的测试用例:

def before(x, conv_weight, in_bias, in_scale, channels):
    args = [x, conv_weight, in_bias]
    x = relay.multiply(x, in_scale)
    x = relay.nn.relu(x)
    x = relay.add(x, in_bias)
    x_var = relay.Var("x_var")
    y1 = relay.nn.conv2d(
        x_var,
        conv_weight,
        channels=channels,
        kernel_size=(3, 3),
        data_layout="NHWC",
        kernel_layout="HWIO",
        padding=(1, 1),
    )
    z = relay.add(y1, x)
    let = relay.Let(x_var, x, z)
    return relay.Function(args, let)
def check(shape, channels):
    x = relay.var("x", shape=shape)
    in_channels = shape[-1]
    in_bias = relay.var("in_bias", shape=(in_channels,))
    in_scale = relay.const(_get_positive_scale(size=(in_channels,)))
    # test depthwise
    assert in_channels == channels
    weight = relay.var("weight")
    y1 = before(x, weight, in_bias, in_scale, channels)
    y1 = run_opt_pass(y1, transform.InferType())
    tvm.IRModule.from_expr(y1).show()
    y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
    tvm.ir.assert_structural_equal(y1, y1_folded)

check((2, 11, 10, 4), 4)
def @main(%x: Tensor[(2, 11, 10, 4), float32] /* ty=Tensor[(2, 11, 10, 4), float32] */, %weight: Tensor[(3, 3, 4, 4), float32] /* ty=Tensor[(3, 3, 4, 4), float32] */, %in_bias: Tensor[(4), float32] /* ty=Tensor[(4), float32] */) -> Tensor[(2, 11, 10, 4), float32] {
  %0 = multiply(%x, meta[relay.Constant][0] /* ty=Tensor[(4), float32] */) /* ty=Tensor[(2, 11, 10, 4), float32] */;
  %1 = nn.relu(%0) /* ty=Tensor[(2, 11, 10, 4), float32] */;
  %2 = add(%1, %in_bias) /* ty=Tensor[(2, 11, 10, 4), float32] */;
  let %x_var: Tensor[(2, 11, 10, 4), float32] /* ty=Tensor[(2, 11, 10, 4), float32] */ = %2;
  %3 = nn.conv2d(%x_var, %weight, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(2, 11, 10, 4), float32] */;
  add(%3, %2) /* ty=Tensor[(2, 11, 10, 4), float32] */
}