前向折叠 ReLU 失败#
%cd ..
import set_env
/media/pc/data/lxw/ai/tvm-book/tests/book/doc/tests
import numpy as np
import tvm
from tvm import relay
from tvm.relay import transform
# from tvm.relay.testing import create_workload
# from tvm.relay.build_module import bind_params_by_name
def initializer(_, param):
param = np.zeros(param.shape)
def _get_positive_scale(size):
return np.random.uniform(0.5, 1, size=size).astype("float32")
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
因为缩放无法通过 ReLU,所以我们不能折叠的测试用例:
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
x = relay.multiply(x, in_scale)
xx = relay.nn.relu(x)
y1 = relay.nn.conv2d(
xx,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
padding=(1, 1),
)
z = relay.add(y1, x)
return relay.Function(relay.analysis.free_vars(z), z)
def check(shape, channels, blocking, in_scale):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
in_channels = shape[3] * shape[4]
in_bias = relay.var("in_bias", shape=(1, in_channels // blocking[0], 1, 1, blocking[0]))
else:
in_channels = shape[-1]
in_bias = relay.var("in_bias", shape=(in_channels,))
assert in_channels == channels
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
tvm.IRModule.from_expr(y1).show()
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
tvm.ir.assert_structural_equal(y1, y1_folded)
in_scale = relay.var("in_scale", shape=(4,))
check((2, 11, 10, 4), 4, None, in_scale)
in_scale = relay.const(-_get_positive_scale((4,)))
check((2, 11, 10, 4), 4, None, in_scale)
in_scale = relay.var("in_scale", shape=(1, 1, 1, 2, 2))
check((2, 11, 10, 2, 2), 4, (2, 2), in_scale)
in_scale = relay.const(-_get_positive_scale((1, 1, 1, 2, 2)))
check((2, 11, 10, 2, 2), 4, (2, 2), in_scale)
def @main(%x: Tensor[(2, 11, 10, 4), float32], %in_scale: Tensor[(4), float32], %weight) {
%0 = multiply(%x, %in_scale);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, %weight, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO");
add(%2, %0)
}
def @main(%x: Tensor[(2, 11, 10, 4), float32], %weight) {
%0 = multiply(%x, meta[relay.Constant][0]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, %weight, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO");
add(%2, %0)
}
def @main(%x: Tensor[(2, 11, 10, 2, 2), float32], %in_scale: Tensor[(1, 1, 1, 2, 2), float32], %weight) {
%0 = multiply(%x, %in_scale);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, %weight, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NHWC2c", kernel_layout="HWIO1i2o");
add(%2, %0)
}
def @main(%x: Tensor[(2, 11, 10, 2, 2), float32], %weight) {
%0 = multiply(%x, meta[relay.Constant][0]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, %weight, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NHWC2c", kernel_layout="HWIO1i2o");
add(%2, %0)
}