前向折叠负缩放的测试用例#
%cd ..
import set_env
/media/pc/data/lxw/ai/tvm-book/tests/book/doc/tests
import numpy as np
import tvm
from tvm import relay
from tvm.relay import transform
# from tvm.relay.testing import create_workload
# from tvm.relay.build_module import bind_params_by_name
def initializer(_, param):
param = np.zeros(param.shape)
def _get_positive_scale(size):
return np.random.uniform(0.5, 1, size=size).astype("float32")
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def before(x, conv_weight, in_scale, channels, blocking):
args = [x, conv_weight]
x = relay.multiply(x, in_scale)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW4i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def expected(x, conv_weight, in_scale, in_channels, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight]
if blocking:
squeezed_scale = relay.squeeze(in_scale, axis=[0, 2, 3])
conv_weight = relay.multiply(
conv_weight, relay.reshape(squeezed_scale, (1, in_channels // 4, 1, 1, 4, 1))
)
# blocking by "i" in OIHWio
else:
squeezed_scale = relay.squeeze(in_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW4i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
test_cases = [
((2, 4, 10, 10), 4, None),
((2, 2, 10, 10, 2), 8, (2, 2))
]
for shape, channels, blocking in test_cases:
x = relay.var("x", shape=shape)
if blocking:
in_channels = shape[1] * shape[4]
in_scale = relay.const(-_get_positive_scale((1, shape[1], 1, 1, shape[4])))
else:
in_channels = shape[1]
in_scale = relay.const(-_get_positive_scale((in_channels, 1, 1)))
weight = relay.var("weight")
y1 = before(x, weight, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
print("FoldScaleAxis 前:")
tvm.IRModule.from_expr(y1).show()
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
print("FoldScaleAxis 后:")
tvm.IRModule.from_expr(y1_folded).show()
y1_expected = expected(x, weight, in_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
tvm.ir.assert_structural_equal(y1_folded, y1_expected)
FoldScaleAxis 前:
FoldScaleAxis 后:
FoldScaleAxis 前:
FoldScaleAxis 后:
def @main(%x: Tensor[(2, 4, 10, 10), float32] /* ty=Tensor[(2, 4, 10, 10), float32] */, %weight: Tensor[(4, 4, 3, 3), float32] /* ty=Tensor[(4, 4, 3, 3), float32] */) -> Tensor[(2, 4, 10, 10), float32] {
%0 = multiply(%x, meta[relay.Constant][0] /* ty=Tensor[(4, 1, 1), float32] */) /* ty=Tensor[(2, 4, 10, 10), float32] */;
nn.conv2d(%0, %weight, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3]) /* ty=Tensor[(2, 4, 10, 10), float32] */
}
def @main(%x: Tensor[(2, 4, 10, 10), float32] /* ty=Tensor[(2, 4, 10, 10), float32] */, %weight: Tensor[(4, 4, 3, 3), float32] /* ty=Tensor[(4, 4, 3, 3), float32] */) -> Tensor[(2, 4, 10, 10), float32] {
%0 = squeeze(meta[relay.Constant][0] /* ty=Tensor[(4, 1, 1), float32] */, axis=[1, 2]) /* ty=Tensor[(4), float32] */;
%1 = expand_dims(%0, axis=1, num_newaxis=2) /* ty=Tensor[(4, 1, 1), float32] */;
%2 = multiply(%weight, %1) /* ty=Tensor[(4, 4, 3, 3), float32] */;
nn.conv2d(%x, %2, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3]) /* ty=Tensor[(2, 4, 10, 10), float32] */
}
def @main(%x: Tensor[(2, 2, 10, 10, 2), float32] /* ty=Tensor[(2, 2, 10, 10, 2), float32] */, %weight: Tensor[(4, 1, 3, 3, 4, 2), float32] /* ty=Tensor[(4, 1, 3, 3, 4, 2), float32] */) -> Tensor[(2, 4, 10, 10, 2), float32] {
%0 = multiply(%x, meta[relay.Constant][0] /* ty=Tensor[(1, 2, 1, 1, 2), float32] */) /* ty=Tensor[(2, 2, 10, 10, 2), float32] */;
nn.conv2d(%0, %weight, padding=[1, 1, 1, 1], channels=8, kernel_size=[3, 3], data_layout="NCHW2c", kernel_layout="OIHW4i2o") /* ty=Tensor[(2, 4, 10, 10, 2), float32] */
}
def @main(%x: Tensor[(2, 2, 10, 10, 2), float32] /* ty=Tensor[(2, 2, 10, 10, 2), float32] */, %weight: Tensor[(4, 1, 3, 3, 4, 2), float32] /* ty=Tensor[(4, 1, 3, 3, 4, 2), float32] */) -> Tensor[(2, 4, 10, 10, 2), float32] {
%0 = squeeze(meta[relay.Constant][0] /* ty=Tensor[(1, 2, 1, 1, 2), float32] */, axis=[0, 2, 3]) /* ty=Tensor[(2, 2), float32] */;
%1 = reshape(%0, newshape=[1, 1, 1, 1, 4, 1]) /* ty=Tensor[(1, 1, 1, 1, 4, 1), float32] */;
%2 = multiply(%weight, %1) /* ty=Tensor[(4, 1, 3, 3, 4, 2), float32] */;
nn.conv2d(%x, %2, padding=[1, 1, 1, 1], channels=8, kernel_size=[3, 3], data_layout="NCHW2c", kernel_layout="OIHW4i2o") /* ty=Tensor[(2, 4, 10, 10, 2), float32] */
}