%%shell
# Installs the latest dev build of TVM from PyPI. If you wish to build
# from source, see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm --pre
4. microTVM PyTorch 教程#
Authors: Mehrdad Hessar
本教程展示了使用 PyTorch 模型的 MicroTVM 主机驱动 AoT 编译。此教程可以在使用 C 运行时(CRT)的 x86 CPU 上执行。
备注
此教程仅在使用 CRT 的 x86 CPU 上运行,无法在 Zephyr 上运行,因为该模型不适合我们当前支持的 Zephyr 开发板。
安装 microTVM Python 依赖#
TVM 不包括 Python 串行通信包,因此在使用 microTVM 之前我们必须安装一个。我们还需要 TFLite 来加载模型。
%%shell
pip install pyserial==3.5 tflite==2.1
import pathlib
import torch
import torchvision
from torchvision import transforms
import numpy as np
from PIL import Image
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.backend import Executor
import tvm.micro.testing
加载预训练 PyTorch 模型#
首先,从 torchvision 中加载预训练的 MobileNetV2。接下来,下载一张猫的图片并对其进行预处理以用作模型输入。
model = torchvision.models.quantization.mobilenet_v2(weights="DEFAULT", quantize=True)
model = model.eval()
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Preprocess the image and convert to tensor
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img, 0)
input_name = "input0"
shape_list = [(input_name, input_shape)]
relay_mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
定义 Target,Runtime 和 Executor#
在本教程中,使用 AOT 主机驱动执行器。为了将模型编译为在 x86 机器上模拟的嵌入式环境,使用 C 运行时(CRT),并使用 host
微目标。使用这种设置,TVM 编译了用于 C 运行时的模型,该模型可以在与物理微控制器相同的流程下在 x86 CPU 机器上运行。CRT 使用 src/runtime/crt/host/main.cc
中的 main() 函数。要使用物理硬件,请将 board
替换为另一个物理微目标,例如 nrf5340dk_nrf5340_cpuapp
或 mps2_an521
,并将平台类型更改为 Zephyr。在 Training Vision Models for microTVM on Arduino 和 microTVM TFLite Tutorial 中可以找到更多目标示例。
target = tvm.micro.testing.get_target(platform="crt", board=None)
# Use the C runtime (crt) and enable static linking by setting system-lib to True
runtime = tvm.relay.backend.Runtime("crt", {"system-lib": True})
# Use the AOT executor rather than graph or vm executors. Don't use unpacked API or C calling style.
executor = Executor("aot")
编译模型#
现在,将模型编译为目标平台:
with tvm.transform.PassContext(
opt_level=3,
config={"tir.disable_vectorize": True},
):
module = tvm.relay.build(
relay_mod, target=target, runtime=runtime, executor=executor, params=params
)
创建 microTVM project#
既然将编译后的模型作为 IRModule,需要创建固件(firmware)项目,以便使用 microTVM 来使用编译后的模型。为此,使用 Project API。
template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))
project_options = {"verbose": False, "workspace_size_bytes": 6 * 1024 * 1024}
temp_dir = tvm.contrib.utils.tempdir() / "project"
project = tvm.micro.generate_project(
str(template_project_path),
module,
temp_dir,
project_options,
)
---------------------------------------------------------------------------
MicroTVMTemplateProjectNotFoundError Traceback (most recent call last)
Cell In[6], line 1
----> 1 template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))
2 project_options = {"verbose": False, "workspace_size_bytes": 6 * 1024 * 1024}
4 temp_dir = tvm.contrib.utils.tempdir() / "project"
File /media/pc/data/lxw/ai/tvm/xinetzone/__pypackages__/3.10/lib/tvm/micro/build.py:106, in get_microtvm_template_projects(platform)
104 break
105 else:
--> 106 raise MicroTVMTemplateProjectNotFoundError()
108 return os.path.join(microtvm_template_projects, platform)
MicroTVMTemplateProjectNotFoundError:
构建,烧录并执行模型#
接下来,构建 microTVM 项目并将其烧录(flash)。如果是通过主机的 main.cc
模拟微控制器(microcontroller),或者选择了 Zephyr 模拟板作为目标,则跳过烧录步骤,因为烧录步骤是针对物理微控制器的。
project.build()
project.flash()
input_data = {input_name: tvm.nd.array(img.astype("float32"))}
with tvm.micro.Session(project.transport()) as session:
aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor())
aot_executor.set_input(**input_data)
aot_executor.run()
result = aot_executor.get_output(0).numpy()
查询 synset 名称#
查询在 1000 个类别的同义词集中预测的 top 1 索引。
synset_url = (
"https://raw.githubusercontent.com/Cadene/"
"pretrained-models.pytorch/master/data/"
"imagenet_synsets.txt"
)
synset_name = "imagenet_synsets.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synsets = f.readlines()
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
class_url = (
"https://raw.githubusercontent.com/Cadene/"
"pretrained-models.pytorch/master/data/"
"imagenet_classes.txt"
)
class_path = download_testdata(class_url, "imagenet_classes.txt", module="data")
with open(class_path) as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
# Get top-1 result for TVM
top1_tvm = np.argmax(result)
tvm_class_key = class_id_to_key[top1_tvm]
# Convert input to PyTorch variable and get PyTorch result for comparison
with torch.no_grad():
torch_img = torch.from_numpy(img)
output = model(torch_img)
# Get top-1 result for PyTorch
top1_torch = np.argmax(output.numpy())
torch_class_key = class_id_to_key[top1_torch]
print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key]))
print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key]))