Tensorflow 前端#

参考:

备注

请将 tensorflow 的 GPU 内存使用限制在必要的范围内,而不是使用所有可用的内存。您可以参考 limiting_gpu_memory_growth 了解如何进行操作。

import numpy as np
import tensorflow as tf
try:
    tf_compat_v1 = tf.compat.v1
except (ImportError, AttributeError):
    tf_compat_v1 = tf

gpus = tf.config.list_physical_devices("GPU")
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print("tensorflow will use experimental.set_memory_growth(True)")
    except RuntimeError as e:
        print("experimental.set_memory_growth option is not available: {}".format(e))
# Tensorflow 实用函数
import tvm.relay.testing.tf as tf_testing
2023-06-09 14:12:16.956157: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.
2023-06-09 14:12:17.005059: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.
2023-06-09 14:12:17.006107: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-06-09 14:12:17.794152: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
2023-06-09 14:12:19.241479: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1956] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.
Skipping registering GPU devices...

准备阶段#

# 模型相关文件的基础位置。
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1"
# 测试图片
img_name = "elephant-299.jpg"
image_url = f"{repo_base}/{img_name}"
# 模型
model_name = "classify_image_graph_def-with_shapes.pb"
model_url = f"{repo_base}/{model_name}"
# 图像标签映射
map_proto = "imagenet_2012_challenge_label_map_proto.pbtxt"
map_proto_url = f"{repo_base}/{map_proto}"
# 标签的人类可读文本。
label_map = "imagenet_synset_to_human_label_map.txt"
label_map_url = f"{repo_base}/{label_map}"

下载如下文件:

from tvm.contrib.download import download_testdata

image_path = download_testdata(image_url, img_name, module="data")
model_path = download_testdata(model_url, model_name, module=["tf", "InceptionV1"])
map_proto_path = download_testdata(map_proto_url, map_proto, module="data")
label_path = download_testdata(label_map_url, label_map, module="data")

在 tensorflow 上推理#

在 TensorFlow 上运行相应的模型。

def create_graph(model_path):
    """从保存的 GraphDef 文件创建图"""
    # 从保存的 graph_def.pb 文件创建图。
    with tf_compat_v1.gfile.GFile(model_path, "rb") as f:
        graph_def = tf_compat_v1.GraphDef()
        graph_def.ParseFromString(f.read())
        # 将 graph_def 中的图导入到当前默认的图中。
        tf.import_graph_def(graph_def, name="") 
        # 对 `graph_def` 进行类型检查,可能进行规范化。
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
    return graph_def

def read_image_tf(image_path):
    if not tf_compat_v1.gfile.Exists(image_path):
        tf.logging.fatal("File does not exist %s", image_path)
    with tf_compat_v1.gfile.GFile(image_path, "rb") as img_f:
        image_data = img_f.read()
    return image_data

def top_k(predictions, map_proto_path, label_path, k=5):
    # 创建节点ID --> 英文字符串查找。
    node_lookup = tf_testing.NodeLookup(
        label_lookup_path=map_proto_path, uid_lookup_path=label_path
    )
    # 打印 tensorflow 的 top5
    top_k = predictions.argsort()[-k:][::-1]
    # print("===== TENSORFLOW 结果 =======")
    return {
        node_lookup.id_to_string(node_id): predictions[node_id]
        for node_id in top_k
    }

运行 TensorFlow 推理:

with tf_compat_v1.Session() as sess:
    # 从保存的 GraphDef 文件创建图
    graph_def = create_graph(model_path)
    # 为图中的节点添加形状属性。
    graph_def = tf_testing.AddShapesToGraphDef(sess, "softmax")
    softmax_tensor = sess.graph.get_tensor_by_name("softmax:0")
    image_data = read_image_tf(image_path)
    predictions = sess.run(softmax_tensor, {"DecodeJpeg/contents:0": image_data})
WARNING:tensorflow:From /media/pc/data/lxw/ai/tvm/xinetzone/__pypackages__/3.10/lib/tvm/relay/testing/tf.py:136: convert_variables_to_constants (from tensorflow.python.framework.convert_to_constants) is deprecated and will be removed in a future version.
Instructions for updating:
This API was designed for TensorFlow v1. See https://www.tensorflow.org/guide/migrate for instructions on how to migrate your code to TensorFlow v2.
WARNING:tensorflow:From /media/pc/data/tmp/cache/conda/envs/tvmz/lib/python3.10/site-packages/tensorflow/python/framework/convert_to_constants.py:952: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This API was designed for TensorFlow v1. See https://www.tensorflow.org/guide/migrate for instructions on how to migrate your code to TensorFlow v2.
2023-06-09 14:12:21.225269: W tensorflow/core/framework/op_def_util.cc:369] Op BatchNormWithGlobalNormalization is deprecated. It will cease to work in GraphDef version 9. Use tf.nn.batch_normalization().
2023-06-09 14:12:21.871263: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:353] MLIR V1 optimization pass is not enabled

展示结果:

predictions = np.squeeze(predictions)
results = top_k(predictions, map_proto_path, label_path, k=5)
print("===== TENSORFLOW 结果 =======")
print("name".ljust(50)+"\t"+"score".ljust(20))
print("-"*65)
for name, score in results.items():
    print(f"{name.ljust(50)}\t{score:.5f}")
===== TENSORFLOW 结果 =======
name                                              	score               
-----------------------------------------------------------------
African elephant, Loxodonta africana              	0.58394
tusker                                            	0.33909
Indian elephant, Elephas maximus                  	0.03186
banana                                            	0.00022
desk                                              	0.00019

Relay 推理#

将 TensorFlow graph 定义导入到 Relay 前端。

结果:

  • sym: Relay 表达式,表示给定的 tensorflow protobuf。

  • params: 从 tensorflow params(张量 protobuf)转换而来的参数。

目标设备设置:

import tvm
from tvm import relay
# 使用这些注释设置来构建 cuda
# target = tvm.target.Target("cuda", host="llvm")
# layout = "NCHW"
# dev = tvm.cuda(0)
target = tvm.target.Target("llvm", host="llvm")
layout = None
dev = tvm.cpu(0)
shape = 299, 299, 3
input_name = "DecodeJpeg/contents"
shape_dict = {input_name: shape}
dtype_dict = {input_name: "uint8"}
with tf_compat_v1.Session() as sess:
    # 从保存的 GraphDef 文件创建图
    graph_def = create_graph(model_path)
    # 为图中的节点添加形状属性。
    graph_def = tf_testing.AddShapesToGraphDef(sess, "softmax")
mod, params = relay.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict)
print("TensorFlow 的 protobuf 已导入到 Relay 前端。")
TensorFlow 的 protobuf 已导入到 Relay 前端。
/media/pc/data/lxw/ai/tvm/xinetzone/__pypackages__/3.10/lib/tvm/relay/frontend/tensorflow.py:537: UserWarning: Ignore the passed shape. Shape in graphdef will be used for operator DecodeJpeg/contents.
  warnings.warn(
/media/pc/data/lxw/ai/tvm/xinetzone/__pypackages__/3.10/lib/tvm/relay/frontend/tensorflow_ops.py:1036: UserWarning: DecodeJpeg: It's a pass through, please handle preprocessing before input
  warnings.warn("DecodeJpeg: It's a pass through, please handle preprocessing before input")

Relay 构建#

使用给定的输入规格将图编译为 LLVM 目标。

结果:

  • graph:编译后的最终计算图。

  • params:编译后的最终参数。

  • lib:可以在具有 TVM 运行时的目标上部署的目标库。

with tvm.transform.PassContext(opt_level=3):
    lib = relay.build(mod, target, params=params)

在 TVM 上执行 portable graph#

现在我们可以尝试在目标设备上部署编译好的模型。

from tvm.contrib import graph_executor
from PIL import Image

image = Image.open(image_path).resize((299, 299))
x = np.array(image)
dtype = "uint8"
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("DecodeJpeg/contents", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)

TVM 处理输出#

将 InceptionV1 模型的输出处理成可读的文本形式。

predictions = tvm_output.numpy()
predictions = np.squeeze(predictions)
results = top_k(predictions, map_proto_path, label_path, k=5)
print("===== TVM 结果 =======")
print("name".ljust(50)+"\t"+"score".ljust(20))
print("-"*65)
for name, score in results.items():
    print(f"{name.ljust(50)}\t{score:.5f}")
===== TVM 结果 =======
name                                              	score               
-----------------------------------------------------------------
African elephant, Loxodonta africana              	0.58335
tusker                                            	0.33901
Indian elephant, Elephas maximus                  	0.02391
banana                                            	0.00025
vault                                             	0.00021

布局变换#

# print(mod["main"])
desired_layouts = {
    'image.resize2d': ['NCHW'],
    'nn.conv2d': ['NCHW', 'default'],
    'nn.max_pool2d': ['NCHW', 'default'],
    'nn.avg_pool2d': ['NCHW', 'default'],
}

# 将布局转换为 NCHW
# RemoveUnusedFunctions 用于清理图。
seq = tvm.transform.Sequential([relay.transform.RemoveUnusedFunctions(),
                                relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
    mod = seq(mod)
# print(mod["main"])
# Call relay compilation
with relay.build_config(opt_level=3):
     lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("DecodeJpeg/contents", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
predictions = tvm_output.numpy()
predictions = np.squeeze(predictions)
results = top_k(predictions, map_proto_path, label_path, k=5)
print("===== TVM 结果 =======")
print("name".ljust(50)+"\t"+"score".ljust(20))
print("-"*65)
for name, score in results.items():
    print(f"{name.ljust(50)}\t{score:.5f}")
===== TVM 结果 =======
name                                              	score               
-----------------------------------------------------------------
African elephant, Loxodonta africana              	0.58335
tusker                                            	0.33901
Indian elephant, Elephas maximus                  	0.02391
banana                                            	0.00025
vault                                             	0.00021