自定义 TVM 自动量化#
采用模板匹配的策略进行量化分区。
显式定义融合规则的好处
更加精细控制融合算子的规则
更好的适配诸如 VTA 等后端。
%cd ..
import set_env
/media/pc/data/lxw/ai/tvm-book/doc/tutorials
import numpy as np
from tvm import relay
import tvm
def load_model(input_shape=[1, 3, 224, 224]):
"""加载前端模型"""
import torch
from torchvision.models import resnet18
from torchvision.models.resnet import ResNet18_Weights
model = resnet18(weights=ResNet18_Weights.DEFAULT)
data = torch.randn(*input_shape)
return torch.jit.trace(model.eval(), data)
H, W = 224, 224
input_shape = (1, 3, H, W)
input_name = "data"
traced_model = load_model(input_shape).eval()
# 将前端模型翻译为 relay 模型
origin_mod, origin_params = relay.frontend.from_pytorch(traced_model, [(input_name, input_shape)])
先以 mod
子图为例研究定义量化过程:
mod = relay.analysis.extract_intermdeiate_expr(origin_mod, 12)
此时的 mod
存在 nn.batch_norm
算子以及常量表达式:
mod.show()
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* span=aten::_convolution_0.data:0:0 */, %aten::_convolution_0.weight: Tensor[(64, 3, 7, 7), float32] /* span=aten::_convolution_0.weight:0:0 */, %aten::batch_norm_0.weight: Tensor[(64), float32] /* span=aten::batch_norm_0.weight:0:0 */, %aten::batch_norm_0.bias: Tensor[(64), float32] /* span=aten::batch_norm_0.bias:0:0 */, %aten::batch_norm_0.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_0.running_mean:0:0 */, %aten::batch_norm_0.running_var: Tensor[(64), float32] /* span=aten::batch_norm_0.running_var:0:0 */, %aten::_convolution_1.weight: Tensor[(64, 64, 3, 3), float32] /* span=aten::_convolution_1.weight:0:0 */, %aten::batch_norm_1.weight: Tensor[(64), float32] /* span=aten::batch_norm_1.weight:0:0 */, %aten::batch_norm_1.bias: Tensor[(64), float32] /* span=aten::batch_norm_1.bias:0:0 */, %aten::batch_norm_1.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_1.running_mean:0:0 */, %aten::batch_norm_1.running_var: Tensor[(64), float32] /* span=aten::batch_norm_1.running_var:0:0 */, %aten::_convolution_2.weight: Tensor[(64, 64, 3, 3), float32] /* span=aten::_convolution_2.weight:0:0 */, %aten::batch_norm_2.weight: Tensor[(64), float32] /* span=aten::batch_norm_2.weight:0:0 */, %aten::batch_norm_2.bias: Tensor[(64), float32] /* span=aten::batch_norm_2.bias:0:0 */, %aten::batch_norm_2.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_2.running_mean:0:0 */, %aten::batch_norm_2.running_var: Tensor[(64), float32] /* span=aten::batch_norm_2.running_var:0:0 */) {
%0 = nn.conv2d(%data, %aten::_convolution_0.weight, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* span=aten::_convolution_0:0:0 */;
%1 = nn.batch_norm(%0, %aten::batch_norm_0.weight, %aten::batch_norm_0.bias, %aten::batch_norm_0.running_mean, %aten::batch_norm_0.running_var) /* span=aten::batch_norm_0:0:0 */;
%2 = %1.0 /* span=aten::batch_norm_0:0:0 */;
%3 = nn.relu(%2) /* span=aten::relu__0:0:0 */;
%4 = nn.max_pool2d(%3, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* span=aten::max_pool2d_0:0:0 */;
%5 = nn.conv2d(%4, %aten::_convolution_1.weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* span=aten::_convolution_1:0:0 */;
%6 = nn.batch_norm(%5, %aten::batch_norm_1.weight, %aten::batch_norm_1.bias, %aten::batch_norm_1.running_mean, %aten::batch_norm_1.running_var) /* span=aten::batch_norm_1:0:0 */;
%7 = %6.0 /* span=aten::batch_norm_1:0:0 */;
%8 = nn.relu(%7) /* span=aten::relu__1:0:0 */;
%9 = nn.conv2d(%8, %aten::_convolution_2.weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* span=aten::_convolution_2:0:0 */;
%10 = nn.batch_norm(%9, %aten::batch_norm_2.weight, %aten::batch_norm_2.bias, %aten::batch_norm_2.running_mean, %aten::batch_norm_2.running_var) /* span=aten::batch_norm_2:0:0 */;
%11 = %10.0 /* span=aten::batch_norm_2:0:0 */;
add(%11, %4) /* span=aten::add__0:0:0 */
}
运行如下代码便可将 nn.batch_norm
进行融合,同时将其模型参数替换掉常量表达式(还有一些其他操作,此时不展开了):
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="kl_divergence",
weight_scale="max",
skip_conv_layers=[],
skip_dense_layer=False
):
# 量化前准备
run_mod = relay.quantize.prerequisite_optimize(mod, origin_params)
run_mod.show()
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 64, 56, 56), float32] {
%0 = nn.conv2d(%data, meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%1 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%2 = nn.relu(%1) /* ty=Tensor[(1, 64, 112, 112), float32] span=aten::relu__0:0:0 */;
%3 = nn.max_pool2d(%2, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::max_pool2d_0:0:0 */;
%4 = nn.conv2d(%3, meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%5 = add(%4, meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%6 = nn.relu(%5) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__1:0:0 */;
%7 = nn.conv2d(%6, meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%8 = add(%7, meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
add(%8, %3) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__0:0:0 */
}
定义融合规则#
想要融合 conv2d+add+relu
结构,可以定义融合函数:
from tvm.relay.dataflow_pattern import is_op, wildcard
def make_conv_add_relu_pattern():
"""创建如下模式
conv2d
|
(add)
|
(relu)
"""
x = wildcard()
w = wildcard()
bias = wildcard()
r = is_op("nn.conv2d")(x, w)
r = is_op("add")(r, bias) | r # bias 是可选的
# 激活函数
r = is_op("nn.relu")(r) | r # 激活函数也是可选的
return r
上述结构模式可以用来匹配 conv2d
、conv2d+add
、conv2d+add+relu
、conv2d+relu
四种模式。
执行融合:
compiler_name = "ccompiler"
pattern_table = [
(f"{compiler_name}.conv_add_relu", make_conv_add_relu_pattern()),
]
merge_passes = tvm.transform.Sequential([
relay.transform.InferType(),
relay.transform.MergeComposite(pattern_table),
# # relay.transform.AnnotateTarget([compiler_name]),
# relay.transform.PartitionGraph(),
])
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="kl_divergence",
weight_scale="max",
skip_conv_layers=[],
skip_dense_layer=False
):
run_mod_f = merge_passes(run_mod)
print(run_mod_f)
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 64, 56, 56), float32] {
%5 = fn (%FunctionVar_2_0: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] */, %FunctionVar_2_1: Tensor[(64, 3, 7, 7), float32] /* ty=Tensor[(64, 3, 7, 7), float32] */, %FunctionVar_2_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_nn.relu_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 112, 112), float32] {
%3 = nn.conv2d(%FunctionVar_2_0, %FunctionVar_2_1, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%4 = add(%3, %FunctionVar_2_2) /* ty=Tensor[(1, 64, 112, 112), float32] */;
nn.relu(%4) /* ty=Tensor[(1, 64, 112, 112), float32] span=aten::relu__0:0:0 */
} /* ty=fn (Tensor[(1, 3, 224, 224), float32], Tensor[(64, 3, 7, 7), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 112, 112), float32] */;
%6 = %5(%data, meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%7 = nn.max_pool2d(%6, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::max_pool2d_0:0:0 */;
%8 = fn (%FunctionVar_1_0: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %FunctionVar_1_1: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] */, %FunctionVar_1_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_nn.relu_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 56, 56), float32] {
%1 = nn.conv2d(%FunctionVar_1_0, %FunctionVar_1_1, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%2 = add(%1, %FunctionVar_1_2) /* ty=Tensor[(1, 64, 56, 56), float32] */;
nn.relu(%2) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__1:0:0 */
} /* ty=fn (Tensor[(1, 64, 56, 56), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%9 = %8(%7, meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%10 = fn (%FunctionVar_0_0: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %FunctionVar_0_1: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] */, %FunctionVar_0_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 56, 56), float32] {
%0 = nn.conv2d(%FunctionVar_0_0, %FunctionVar_0_1, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
add(%0, %FunctionVar_0_2) /* ty=Tensor[(1, 64, 56, 56), float32] */
} /* ty=fn (Tensor[(1, 64, 56, 56), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%11 = %10(%9, meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
add(%11, %7) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__0:0:0 */
}
可以看出,上述剩余 nn.max_pool2d
和残差 add
没有被融合,故此可以添加规则:
def make_max_pool2d_pattern():
x = wildcard()
r = is_op("nn.max_pool2d")(x)
return r
def make_add_pattern():
return wildcard() + wildcard()
compiler_name = "ccompiler"
# 按照顺序依次执行匹配工作
pattern_table = [
(f"{compiler_name}.conv_add_relu", make_conv_add_relu_pattern()),
(f"{compiler_name}.max_pool2d", make_max_pool2d_pattern()),
(f"{compiler_name}.add", make_add_pattern()),
]
merge_passes = tvm.transform.Sequential([
relay.transform.InferType(),
relay.transform.MergeComposite(pattern_table),
# # relay.transform.AnnotateTarget([compiler_name]),
relay.transform.PartitionGraph(),
])
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="kl_divergence",
weight_scale="max",
skip_conv_layers=[],
skip_dense_layer=False
):
run_mod_f = merge_passes(run_mod)
print(run_mod_f)
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 64, 56, 56), float32] {
%5 = fn (%FunctionVar_2_0: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] */, %FunctionVar_2_1: Tensor[(64, 3, 7, 7), float32] /* ty=Tensor[(64, 3, 7, 7), float32] */, %FunctionVar_2_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_nn.relu_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 112, 112), float32] {
%3 = nn.conv2d(%FunctionVar_2_0, %FunctionVar_2_1, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%4 = add(%3, %FunctionVar_2_2) /* ty=Tensor[(1, 64, 112, 112), float32] */;
nn.relu(%4) /* ty=Tensor[(1, 64, 112, 112), float32] span=aten::relu__0:0:0 */
} /* ty=fn (Tensor[(1, 3, 224, 224), float32], Tensor[(64, 3, 7, 7), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 112, 112), float32] */;
%6 = %5(%data, meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%7 = fn (%FunctionVar_0_02: Tensor[(1, 64, 112, 112), float32] /* ty=Tensor[(1, 64, 112, 112), float32] */, PartitionedFromPattern="nn.max_pool2d_", Composite="ccompiler.max_pool2d") -> Tensor[(1, 64, 56, 56), float32] {
nn.max_pool2d(%FunctionVar_0_02, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::max_pool2d_0:0:0 */
} /* ty=fn (Tensor[(1, 64, 112, 112), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%8 = %7(%6) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%9 = fn (%FunctionVar_1_0: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %FunctionVar_1_1: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] */, %FunctionVar_1_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_nn.relu_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 56, 56), float32] {
%1 = nn.conv2d(%FunctionVar_1_0, %FunctionVar_1_1, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%2 = add(%1, %FunctionVar_1_2) /* ty=Tensor[(1, 64, 56, 56), float32] */;
nn.relu(%2) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__1:0:0 */
} /* ty=fn (Tensor[(1, 64, 56, 56), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%10 = %9(%8, meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%11 = fn (%FunctionVar_0_01: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %FunctionVar_0_11: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] */, %FunctionVar_0_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 56, 56), float32] {
%0 = nn.conv2d(%FunctionVar_0_01, %FunctionVar_0_11, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
add(%0, %FunctionVar_0_2) /* ty=Tensor[(1, 64, 56, 56), float32] */
} /* ty=fn (Tensor[(1, 64, 56, 56), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%12 = %11(%10, meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%13 = fn (%FunctionVar_0_0: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %FunctionVar_0_1: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, PartitionedFromPattern="add_", Composite="ccompiler.add") -> Tensor[(1, 64, 56, 56), float32] {
add(%FunctionVar_0_0, %FunctionVar_0_1) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__0:0:0 */
} /* ty=fn (Tensor[(1, 64, 56, 56), float32], Tensor[(1, 64, 56, 56), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%13(%12, %8) /* ty=Tensor[(1, 64, 56, 56), float32] */
}
符合期望结构。
为融合函数添加 QPartitionExpr
算子#
from tvm.relay import Call
from tvm.relay.function import Function, FunctionWithFields
from tvm.relay.quantize._partition import QPartitionExpr
@tvm.relay.transform.function_pass(opt_level=1)
class MergeGraphTransform:
def __init__(self):
self.reset()
def reset(self):
self.nodes = []
def transform_function(self, func, mod, ctx):
obj = self
class Replace(tvm.relay.ExprMutator):
def visit_function(self, fn):
new_params = [self.visit(x) for x in fn.params]
new_body = self.visit(fn.body)
if not isinstance(new_body.op, Function): # 防止循环添加 QPartitionExpr
new_body = QPartitionExpr(new_body).realize()
if new_params == list(fn.params) and new_body == fn.body:
new_fn = fn
else:
new_fn = FunctionWithFields(fn, list(new_params), new_body)
obj.nodes.append(new_fn)
return new_fn
return Replace().visit(func)
transform = MergeGraphTransform()
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="kl_divergence",
weight_scale="max",
skip_conv_layers=[],
skip_dense_layer=False
):
mod_sq = transform(run_mod_f)
mod_sq.show()
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 64, 56, 56), float32] {
%15 = fn (%FunctionVar_2_0: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] */, %FunctionVar_2_1: Tensor[(64, 3, 7, 7), float32] /* ty=Tensor[(64, 3, 7, 7), float32] */, %FunctionVar_2_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_nn.relu_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 112, 112), float32] {
%11 = nn.conv2d(%FunctionVar_2_0, %FunctionVar_2_1, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%12 = add(%11, %FunctionVar_2_2) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%13 = nn.relu(%12) /* ty=Tensor[(1, 64, 112, 112), float32] span=aten::relu__0:0:0 */;
%14 = annotation.cast_hint(%13, dtype="int8") /* ty=Tensor[(1, 64, 112, 112), float32] */;
annotation.stop_fusion(%14) /* ty=Tensor[(1, 64, 112, 112), float32] */
} /* ty=fn (Tensor[(1, 3, 224, 224), float32], Tensor[(64, 3, 7, 7), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 112, 112), float32] */;
%16 = %15(%data, meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%17 = fn (%FunctionVar_0_02: Tensor[(1, 64, 112, 112), float32] /* ty=Tensor[(1, 64, 112, 112), float32] */, PartitionedFromPattern="nn.max_pool2d_", Composite="ccompiler.max_pool2d") -> Tensor[(1, 64, 56, 56), float32] {
%9 = nn.max_pool2d(%FunctionVar_0_02, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::max_pool2d_0:0:0 */;
%10 = annotation.cast_hint(%9, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
annotation.stop_fusion(%10) /* ty=Tensor[(1, 64, 56, 56), float32] */
} /* ty=fn (Tensor[(1, 64, 112, 112), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%18 = %17(%16) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%19 = fn (%FunctionVar_1_0: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %FunctionVar_1_1: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] */, %FunctionVar_1_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_nn.relu_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 56, 56), float32] {
%5 = nn.conv2d(%FunctionVar_1_0, %FunctionVar_1_1, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%6 = add(%5, %FunctionVar_1_2) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%7 = nn.relu(%6) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__1:0:0 */;
%8 = annotation.cast_hint(%7, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
annotation.stop_fusion(%8) /* ty=Tensor[(1, 64, 56, 56), float32] */
} /* ty=fn (Tensor[(1, 64, 56, 56), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%20 = %19(%18, meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%21 = fn (%FunctionVar_0_01: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %FunctionVar_0_11: Tensor[(64, 64, 3, 3), float32] /* ty=Tensor[(64, 64, 3, 3), float32] */, %FunctionVar_0_2: Tensor[(64, 1, 1), float32] /* ty=Tensor[(64, 1, 1), float32] */, PartitionedFromPattern="nn.conv2d_add_", Composite="ccompiler.conv_add_relu") -> Tensor[(1, 64, 56, 56), float32] {
%2 = nn.conv2d(%FunctionVar_0_01, %FunctionVar_0_11, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%3 = add(%2, %FunctionVar_0_2) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%4 = annotation.cast_hint(%3, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
annotation.stop_fusion(%4) /* ty=Tensor[(1, 64, 56, 56), float32] */
} /* ty=fn (Tensor[(1, 64, 56, 56), float32], Tensor[(64, 64, 3, 3), float32], Tensor[(64, 1, 1), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%22 = %21(%20, meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%23 = fn (%FunctionVar_0_0: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, %FunctionVar_0_1: Tensor[(1, 64, 56, 56), float32] /* ty=Tensor[(1, 64, 56, 56), float32] */, PartitionedFromPattern="add_", Composite="ccompiler.add") -> Tensor[(1, 64, 56, 56), float32] {
%0 = add(%FunctionVar_0_0, %FunctionVar_0_1) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__0:0:0 */;
%1 = annotation.cast_hint(%0, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
annotation.stop_fusion(%1) /* ty=Tensor[(1, 64, 56, 56), float32] */
} /* ty=fn (Tensor[(1, 64, 56, 56), float32], Tensor[(1, 64, 56, 56), float32]) -> Tensor[(1, 64, 56, 56), float32] */;
%23(%22, %18) /* ty=Tensor[(1, 64, 56, 56), float32] */
}
消除计算图中的函数表达式#
由于 tvm.contrib.graph_executor.GraphModule
不支持对 tvm.relay.function.Function
进行推理,需要分解其为原语函数,以支持后续的校准过程:
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="kl_divergence",
weight_scale="max",
skip_conv_layers=[],
skip_dense_layer=False
):
run_mod_sq = relay.transform.DefuseOps()(mod_sq)
run_mod_sq.show()
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 64, 56, 56), float32] {
%0 = nn.conv2d(%data, meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%1 = add(%0, meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%2 = nn.relu(%1) /* ty=Tensor[(1, 64, 112, 112), float32] span=aten::relu__0:0:0 */;
%3 = annotation.cast_hint(%2, dtype="int8") /* ty=Tensor[(1, 64, 112, 112), float32] */;
%4 = annotation.stop_fusion(%3) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%5 = nn.max_pool2d(%4, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::max_pool2d_0:0:0 */;
%6 = annotation.cast_hint(%5, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%7 = annotation.stop_fusion(%6) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%8 = nn.conv2d(%7, meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%9 = add(%8, meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%10 = nn.relu(%9) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__1:0:0 */;
%11 = annotation.cast_hint(%10, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%12 = annotation.stop_fusion(%11) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%13 = nn.conv2d(%12, meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%14 = add(%13, meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%15 = annotation.cast_hint(%14, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%16 = annotation.stop_fusion(%15) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%17 = add(%16, %7) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__0:0:0 */;
%18 = annotation.cast_hint(%17, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
annotation.stop_fusion(%18) /* ty=Tensor[(1, 64, 56, 56), float32] */
}
注解计算图#
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="kl_divergence",
weight_scale="max",
skip_conv_layers=[],
skip_dense_layer=False
):
annotate_mod = relay.quantize.annotate()(run_mod_sq)
annotate_mod.show()
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */, %dom_scale: float32 /* ty=float32 */, %clip_min: float32 /* ty=float32 */, %clip_max: float32 /* ty=float32 */, %dom_scale1: float32 /* ty=float32 */, %clip_min1: float32 /* ty=float32 */, %clip_max1: float32 /* ty=float32 */, %dom_scale2: float32 /* ty=float32 */, %clip_min2: float32 /* ty=float32 */, %clip_max2: float32 /* ty=float32 */, %dom_scale3: float32 /* ty=float32 */, %clip_min3: float32 /* ty=float32 */, %clip_max3: float32 /* ty=float32 */, %dom_scale4: float32 /* ty=float32 */, %clip_min4: float32 /* ty=float32 */, %clip_max4: float32 /* ty=float32 */, %dom_scale5: float32 /* ty=float32 */, %clip_min5: float32 /* ty=float32 */, %clip_max5: float32 /* ty=float32 */, %dom_scale6: float32 /* ty=float32 */, %clip_min6: float32 /* ty=float32 */, %clip_max6: float32 /* ty=float32 */, %dom_scale7: float32 /* ty=float32 */, %clip_min7: float32 /* ty=float32 */, %clip_max7: float32 /* ty=float32 */, %dom_scale8: float32 /* ty=float32 */, %clip_min8: float32 /* ty=float32 */, %clip_max8: float32 /* ty=float32 */, %dom_scale9: float32 /* ty=float32 */, %clip_min9: float32 /* ty=float32 */, %clip_max9: float32 /* ty=float32 */, %dom_scale10: float32 /* ty=float32 */, %clip_min10: float32 /* ty=float32 */, %clip_max10: float32 /* ty=float32 */, %dom_scale11: float32 /* ty=float32 */, %clip_min11: float32 /* ty=float32 */, %clip_max11: float32 /* ty=float32 */) -> Tensor[(1, 64, 56, 56), float32] {
%0 = relay.op.annotation.simulated_quantize(%data, %dom_scale, %clip_min, %clip_max, kind=1) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%1 = relay.op.annotation.simulated_quantize(meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, %dom_scale1, %clip_min1, %clip_max1, kind=2) /* ty=Tensor[(64, 3, 7, 7), float32] */;
%2 = nn.conv2d(%0, %1, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%3 = relay.op.annotation.simulated_quantize(meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */, %dom_scale2, %clip_min2, %clip_max2, kind=2) /* ty=Tensor[(64, 1, 1), float32] */;
%4 = add(%2, %3) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%5 = nn.relu(%4) /* ty=Tensor[(1, 64, 112, 112), float32] span=aten::relu__0:0:0 */;
%6 = relay.op.annotation.simulated_quantize(%5, %dom_scale3, %clip_min3, %clip_max3, kind=1) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%7 = annotation.cast_hint(%6, dtype="int8") /* ty=Tensor[(1, 64, 112, 112), float32] */;
%8 = annotation.stop_fusion(%7) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%9 = nn.max_pool2d(%8, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::max_pool2d_0:0:0 */;
%10 = annotation.cast_hint(%9, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%11 = annotation.stop_fusion(%10) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%12 = relay.op.annotation.simulated_quantize(%11, %dom_scale4, %clip_min4, %clip_max4, kind=1) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%13 = relay.op.annotation.simulated_quantize(meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, %dom_scale5, %clip_min5, %clip_max5, kind=2) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%14 = nn.conv2d(%12, %13, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%15 = relay.op.annotation.simulated_quantize(meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */, %dom_scale6, %clip_min6, %clip_max6, kind=2) /* ty=Tensor[(64, 1, 1), float32] */;
%16 = add(%14, %15) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%17 = nn.relu(%16) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__1:0:0 */;
%18 = relay.op.annotation.simulated_quantize(%17, %dom_scale7, %clip_min7, %clip_max7, kind=1) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%19 = annotation.cast_hint(%18, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%20 = annotation.stop_fusion(%19) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%21 = relay.op.annotation.simulated_quantize(meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, %dom_scale8, %clip_min8, %clip_max8, kind=2) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%22 = nn.conv2d(%20, %21, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%23 = relay.op.annotation.simulated_quantize(meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */, %dom_scale9, %clip_min9, %clip_max9, kind=2) /* ty=Tensor[(64, 1, 1), float32] */;
%24 = add(%22, %23) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%25 = relay.op.annotation.simulated_quantize(%24, %dom_scale10, %clip_min10, %clip_max10, kind=1) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%26 = annotation.cast_hint(%25, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%27 = annotation.stop_fusion(%26) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%28 = add(%27, %12) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__0:0:0 */;
%29 = relay.op.annotation.simulated_quantize(%28, %dom_scale11, %clip_min11, %clip_max11, kind=1) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%30 = annotation.cast_hint(%29, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
annotation.stop_fusion(%30) /* ty=Tensor[(1, 64, 56, 56), float32] */
}
模拟量化#
from tvm.relay.quantize import calibrate
# 定义校准数据集
def data_iter(input_name, input_shape, num=1):
for _ in range(num):
yield {input_name: np.random.normal(size=input_shape)}
dataset = data_iter(input_name, input_shape)
calibrate_pass = tvm.transform.module_pass(
calibrate(dataset), opt_level=1, name="QuantizeCalibrate"
)
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="kl_divergence",
weight_scale="max",
skip_conv_layers=[],
skip_dense_layer=False
):
calibrate_mod = calibrate_pass(annotate_mod)
calibrate_mod.show()
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 64, 56, 56), float32] {
%0 = relay.op.annotation.simulated_quantize(%data, 0.0321894f, -127f, 127f, kind=1) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%1 = relay.op.annotation.simulated_quantize(meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, 0.00307715f, -127f, 127f, kind=2) /* ty=Tensor[(64, 3, 7, 7), float32] */;
%2 = nn.conv2d(%0, %1, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%3 = relay.op.annotation.simulated_quantize(meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */, 0.00541632f, -127f, 127f, kind=2) /* ty=Tensor[(64, 1, 1), float32] */;
%4 = add(%2, %3) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%5 = nn.relu(%4) /* ty=Tensor[(1, 64, 112, 112), float32] span=aten::relu__0:0:0 */;
%6 = relay.op.annotation.simulated_quantize(%5, 0.0271459f, -127f, 127f, kind=1) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%7 = annotation.cast_hint(%6, dtype="int8") /* ty=Tensor[(1, 64, 112, 112), float32] */;
%8 = annotation.stop_fusion(%7) /* ty=Tensor[(1, 64, 112, 112), float32] */;
%9 = nn.max_pool2d(%8, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::max_pool2d_0:0:0 */;
%10 = annotation.cast_hint(%9, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%11 = annotation.stop_fusion(%10) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%12 = relay.op.annotation.simulated_quantize(%11, 0.0293179f, -127f, 127f, kind=1) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%13 = relay.op.annotation.simulated_quantize(meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, 0.00292581f, -127f, 127f, kind=2) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%14 = nn.conv2d(%12, %13, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%15 = relay.op.annotation.simulated_quantize(meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */, 0.00861102f, -127f, 127f, kind=2) /* ty=Tensor[(64, 1, 1), float32] */;
%16 = add(%14, %15) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%17 = nn.relu(%16) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::relu__1:0:0 */;
%18 = relay.op.annotation.simulated_quantize(%17, 0.0113234f, -127f, 127f, kind=1) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%19 = annotation.cast_hint(%18, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%20 = annotation.stop_fusion(%19) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%21 = relay.op.annotation.simulated_quantize(meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, 0.00602173f, -127f, 127f, kind=2) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%22 = nn.conv2d(%20, %21, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%23 = relay.op.annotation.simulated_quantize(meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */, 0.0139479f, -127f, 127f, kind=2) /* ty=Tensor[(64, 1, 1), float32] */;
%24 = add(%22, %23) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%25 = relay.op.annotation.simulated_quantize(%24, 0.016983f, -127f, 127f, kind=1) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%26 = annotation.cast_hint(%25, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
%27 = annotation.stop_fusion(%26) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%28 = add(%27, %12) /* ty=Tensor[(1, 64, 56, 56), float32] span=aten::add__0:0:0 */;
%29 = relay.op.annotation.simulated_quantize(%28, 0.0319001f, -127f, 127f, kind=1) /* ty=Tensor[(1, 64, 56, 56), float32] */;
%30 = annotation.cast_hint(%29, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), float32] */;
annotation.stop_fusion(%30) /* ty=Tensor[(1, 64, 56, 56), float32] */
}
量化实现#
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="kl_divergence",
weight_scale="max",
skip_conv_layers=[],
skip_dense_layer=False
):
run_mod_r = relay.quantize.realize()(calibrate_mod)
run_mod_r.show()
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 64, 56, 56), float32] {
%0 = multiply(%data, 31.0661f /* ty=float32 */) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%1 = round(%0) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%2 = clip(%1, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%3 = multiply(meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), float32] */, 324.976f /* ty=float32 */) /* ty=Tensor[(64, 3, 7, 7), float32] */;
%4 = round(%3) /* ty=Tensor[(64, 3, 7, 7), float32] */;
%5 = clip(%4, a_min=-127f, a_max=127f) /* ty=Tensor[(64, 3, 7, 7), float32] */;
%6 = cast(%2, dtype="int8") /* ty=Tensor[(1, 3, 224, 224), int8] */;
%7 = cast(%5, dtype="int8") /* ty=Tensor[(64, 3, 7, 7), int8] */;
%8 = multiply(meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), float32] */, 184.627f /* ty=float32 */) /* ty=Tensor[(64, 1, 1), float32] */;
%9 = round(%8) /* ty=Tensor[(64, 1, 1), float32] */;
%10 = clip(%9, a_min=-127f, a_max=127f) /* ty=Tensor[(64, 1, 1), float32] */;
%11 = cast(%10, dtype="int32") /* ty=Tensor[(64, 1, 1), int32] */;
%12 = fixed_point_multiply(%11, multiplier=1834815744, shift=6) /* ty=Tensor[(64, 1, 1), int32] */;
%13 = cast(%12, dtype="int32") /* ty=Tensor[(64, 1, 1), int32] */;
%14 = nn.conv2d(%6, %7, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7], out_dtype="int32") /* ty=Tensor[(1, 64, 112, 112), int32] */;
%15 = annotation.stop_fusion(%13) /* ty=Tensor[(64, 1, 1), int32] */;
%16 = add(%14, %15) /* ty=Tensor[(1, 64, 112, 112), int32] */;
%17 = nn.relu(%16) /* ty=Tensor[(1, 64, 112, 112), int32] */;
%18 = cast(%17, dtype="int64") /* ty=Tensor[(1, 64, 112, 112), int64] */;
%19 = fixed_point_multiply(%18, multiplier=2005982080, shift=-8) /* ty=Tensor[(1, 64, 112, 112), int64] */;
%20 = clip(%19, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 112, 112), int64] */;
%21 = cast(%20, dtype="int32") /* ty=Tensor[(1, 64, 112, 112), int32] */;
%22 = cast(%21, dtype="int8") /* ty=Tensor[(1, 64, 112, 112), int8] */;
%23 = annotation.stop_fusion(%22) /* ty=Tensor[(1, 64, 112, 112), int8] */;
%24 = cast(%23, dtype="int8") /* ty=Tensor[(1, 64, 112, 112), int8] */;
%25 = nn.max_pool2d(%24, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%26 = cast(%25, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%27 = annotation.stop_fusion(%26) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%28 = cast(%27, dtype="int64") /* ty=Tensor[(1, 64, 56, 56), int64] */;
%29 = fixed_point_multiply(%28, multiplier=1988388480, shift=0) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%30 = clip(%29, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%31 = multiply(meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), float32] */, 341.785f /* ty=float32 */) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%32 = round(%31) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%33 = clip(%32, a_min=-127f, a_max=127f) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%34 = cast(%30, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%35 = cast(%33, dtype="int8") /* ty=Tensor[(64, 64, 3, 3), int8] */;
%36 = multiply(meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), float32] */, 116.13f /* ty=float32 */) /* ty=Tensor[(64, 1, 1), float32] */;
%37 = round(%36) /* ty=Tensor[(64, 1, 1), float32] */;
%38 = clip(%37, a_min=-127f, a_max=127f) /* ty=Tensor[(64, 1, 1), float32] */;
%39 = cast(%38, dtype="int32") /* ty=Tensor[(64, 1, 1), int32] */;
%40 = fixed_point_multiply(%39, multiplier=1684203520, shift=7) /* ty=Tensor[(64, 1, 1), int32] */;
%41 = cast(%40, dtype="int32") /* ty=Tensor[(64, 1, 1), int32] */;
%42 = nn.conv2d(%34, %35, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], out_dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%43 = annotation.stop_fusion(%41) /* ty=Tensor[(64, 1, 1), int32] */;
%44 = add(%42, %43) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%45 = nn.relu(%44) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%46 = cast(%45, dtype="int64") /* ty=Tensor[(1, 64, 56, 56), int64] */;
%47 = fixed_point_multiply(%46, multiplier=2082302592, shift=-7) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%48 = clip(%47, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%49 = cast(%48, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%50 = cast(%49, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%51 = multiply(meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), float32] */, 166.065f /* ty=float32 */) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%52 = round(%51) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%53 = clip(%52, a_min=-127f, a_max=127f) /* ty=Tensor[(64, 64, 3, 3), float32] */;
%54 = annotation.stop_fusion(%50) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%55 = cast(%53, dtype="int8") /* ty=Tensor[(64, 64, 3, 3), int8] */;
%56 = multiply(meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), float32] */, 71.6953f /* ty=float32 */) /* ty=Tensor[(64, 1, 1), float32] */;
%57 = round(%56) /* ty=Tensor[(64, 1, 1), float32] */;
%58 = clip(%57, a_min=-127f, a_max=127f) /* ty=Tensor[(64, 1, 1), float32] */;
%59 = cast(%58, dtype="int32") /* ty=Tensor[(64, 1, 1), int32] */;
%60 = fixed_point_multiply(%59, multiplier=1715941248, shift=8) /* ty=Tensor[(64, 1, 1), int32] */;
%61 = cast(%60, dtype="int32") /* ty=Tensor[(64, 1, 1), int32] */;
%62 = nn.conv2d(%54, %55, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], out_dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%63 = annotation.stop_fusion(%61) /* ty=Tensor[(64, 1, 1), int32] */;
%64 = add(%62, %63) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%65 = cast(%64, dtype="int64") /* ty=Tensor[(1, 64, 56, 56), int64] */;
%66 = fixed_point_multiply(%65, multiplier=1103626752, shift=-7) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%67 = clip(%66, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%68 = cast(%67, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%69 = cast(%68, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%70 = annotation.stop_fusion(%69) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%71 = cast(%34, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%72 = fixed_point_multiply(%71, multiplier=1853610368, shift=1) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%73 = cast(%70, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%74 = cast(%72, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%75 = add(%73, %74) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%76 = cast(%75, dtype="int64") /* ty=Tensor[(1, 64, 56, 56), int64] */;
%77 = fixed_point_multiply(%76, multiplier=1143279232, shift=0) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%78 = clip(%77, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%79 = cast(%78, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%80 = cast(%79, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%81 = annotation.stop_fusion(%80) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%82 = cast(%81, dtype="float32") /* ty=Tensor[(1, 64, 56, 56), float32] */;
multiply(%82, 0.0319001f /* ty=float32 */) /* ty=Tensor[(1, 64, 56, 56), float32] */
}
折叠常量:
with tvm.transform.PassContext(opt_level=3):
run_mod_r = relay.transform.FoldConstant()(run_mod_r)
run_mod_r = relay.transform.SimplifyInference()(run_mod_r)
run_mod_r.show()
def @main(%data: Tensor[(1, 3, 224, 224), float32] /* ty=Tensor[(1, 3, 224, 224), float32] span=aten::_convolution_0.data:0:0 */) -> Tensor[(1, 64, 56, 56), float32] {
%0 = multiply(%data, 31.0661f /* ty=float32 */) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%1 = round(%0) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%2 = clip(%1, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 3, 224, 224), float32] */;
%3 = cast(%2, dtype="int8") /* ty=Tensor[(1, 3, 224, 224), int8] */;
%4 = nn.conv2d(%3, meta[relay.Constant][0] /* ty=Tensor[(64, 3, 7, 7), int8] */, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7], out_dtype="int32") /* ty=Tensor[(1, 64, 112, 112), int32] */;
%5 = add(%4, meta[relay.Constant][1] /* ty=Tensor[(64, 1, 1), int32] */) /* ty=Tensor[(1, 64, 112, 112), int32] */;
%6 = nn.relu(%5) /* ty=Tensor[(1, 64, 112, 112), int32] */;
%7 = cast(%6, dtype="int64") /* ty=Tensor[(1, 64, 112, 112), int64] */;
%8 = fixed_point_multiply(%7, multiplier=2005982080, shift=-8) /* ty=Tensor[(1, 64, 112, 112), int64] */;
%9 = clip(%8, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 112, 112), int64] */;
%10 = cast(%9, dtype="int32") /* ty=Tensor[(1, 64, 112, 112), int32] */;
%11 = cast(%10, dtype="int8") /* ty=Tensor[(1, 64, 112, 112), int8] */;
%12 = annotation.stop_fusion(%11) /* ty=Tensor[(1, 64, 112, 112), int8] */;
%13 = cast(%12, dtype="int8") /* ty=Tensor[(1, 64, 112, 112), int8] */;
%14 = nn.max_pool2d(%13, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%15 = cast(%14, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%16 = annotation.stop_fusion(%15) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%17 = cast(%16, dtype="int64") /* ty=Tensor[(1, 64, 56, 56), int64] */;
%18 = fixed_point_multiply(%17, multiplier=1988388480, shift=0) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%19 = clip(%18, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%20 = cast(%19, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%21 = nn.conv2d(%20, meta[relay.Constant][2] /* ty=Tensor[(64, 64, 3, 3), int8] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], out_dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%22 = add(%21, meta[relay.Constant][3] /* ty=Tensor[(64, 1, 1), int32] */) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%23 = nn.relu(%22) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%24 = cast(%23, dtype="int64") /* ty=Tensor[(1, 64, 56, 56), int64] */;
%25 = fixed_point_multiply(%24, multiplier=2082302592, shift=-7) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%26 = clip(%25, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%27 = cast(%26, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%28 = cast(%27, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%29 = annotation.stop_fusion(%28) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%30 = nn.conv2d(%29, meta[relay.Constant][4] /* ty=Tensor[(64, 64, 3, 3), int8] */, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], out_dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%31 = add(%30, meta[relay.Constant][5] /* ty=Tensor[(64, 1, 1), int32] */) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%32 = cast(%31, dtype="int64") /* ty=Tensor[(1, 64, 56, 56), int64] */;
%33 = fixed_point_multiply(%32, multiplier=1103626752, shift=-7) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%34 = clip(%33, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%35 = cast(%34, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%36 = cast(%35, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%37 = annotation.stop_fusion(%36) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%38 = cast(%20, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%39 = fixed_point_multiply(%38, multiplier=1853610368, shift=1) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%40 = cast(%37, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%41 = cast(%39, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%42 = add(%40, %41) /* ty=Tensor[(1, 64, 56, 56), int32] */;
%43 = cast(%42, dtype="int64") /* ty=Tensor[(1, 64, 56, 56), int64] */;
%44 = fixed_point_multiply(%43, multiplier=1143279232, shift=0) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%45 = clip(%44, a_min=-127f, a_max=127f) /* ty=Tensor[(1, 64, 56, 56), int64] */;
%46 = cast(%45, dtype="int32") /* ty=Tensor[(1, 64, 56, 56), int32] */;
%47 = cast(%46, dtype="int8") /* ty=Tensor[(1, 64, 56, 56), int8] */;
%48 = annotation.stop_fusion(%47) /* ty=Tensor[(1, 64, 56, 56), int8] */;
%49 = cast(%48, dtype="float32") /* ty=Tensor[(1, 64, 56, 56), float32] */;
multiply(%49, 0.0319001f /* ty=float32 */) /* ty=Tensor[(1, 64, 56, 56), float32] */
}