TensorflowRunner
#
%cd ..
from pathlib import Path
temp_dir = Path(".temp")
temp_dir.mkdir(exist_ok=True)
/media/pc/data/lxw/ai/tvm-book/doc/tutorials/msc
构建前端模型:
from tvm.contrib.msc.framework.tensorflow import tf_v1
import tvm
def _get_tf_graph():
"""Get tensorflow graphdef"""
# pylint: disable=import-outside-toplevel
try:
import tvm.relay.testing.tf as tf_testing
tf_graph = tf_v1.Graph()
with tf_graph.as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
return tf_graph, graph_def
except: # pylint: disable=bare-except
print("please install tensorflow package")
return None, None
2024-12-31 17:42:47.206712: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-12-31 17:42:47.504784: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-12-31 17:42:47.619618: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-12-31 17:42:48.380815: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-12-31 17:43:08.227733: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
import numpy as np
from tvm.contrib.msc.framework.tensorflow.frontend import from_tensorflow
from tvm.contrib.msc.framework.tensorflow.runtime import TensorflowRunner
from tvm.contrib.msc.core import utils as msc_utils
tf_graph, graph_def = _get_tf_graph()
if tf_graph and graph_def:
path = f"{temp_dir}/test_runner_tf"
workspace = msc_utils.set_workspace(msc_utils.msc_dir(path, keep_history=False))
log_path = workspace.relpath("MSC_LOG", keep_history=False)
msc_utils.set_global_logger("critical", log_path)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_name = "MobilenetV2/Predictions/Reshape_1:0"
# get golden
with tf_v1.Session(graph=tf_graph) as sess:
golden = sess.run([out_name], {"input:0": data})
# get outputs
shape_dict = {"input": data.shape}
mod, _ = from_tensorflow(graph_def, shape_dict, [out_name], as_msc=False)
runner = TensorflowRunner(mod)
runner.build()
outputs = runner.run([data], ret_type="list")
workspace.destory()
for gol_r, out_r in zip(golden, outputs):
tvm.testing.assert_allclose(gol_r, msc_utils.cast_array(out_r), atol=1e-3, rtol=1e-3)
Show code cell output
WARNING:tensorflow:From /media/pc/data/lxw/ai/tvm/python/tvm/relay/testing/tf.py:282: FastGFile.__init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.gfile.GFile.
2024-12-31 17:43:35.660493: I tensorflow/core/common_runtime/gpu/gpu_device.cc:2021] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21128 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3090, pci bus id: 0000:03:00.0, compute capability: 8.6
2024-12-31 17:43:35.661150: I tensorflow/core/common_runtime/gpu/gpu_device.cc:2021] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 9796 MB memory: -> device: 1, name: NVIDIA GeForce RTX 2080 Ti, pci bus id: 0000:81:00.0, compute capability: 7.5
2024-12-31 17:43:35.750451: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled
2024-12-31 17:43:37.934376: I external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:531] Loaded cuDNN version 8907
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
W0000 00:00:1735638218.239880 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.328986 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.329503 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.330011 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.330546 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.348735 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.351643 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.354133 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.356545 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.357043 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.443659 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.446632 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.449614 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.450069 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.450510 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.450940 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.451378 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.469990 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.473206 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.476641 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.479949 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.483028 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.508529 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.509151 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.511762 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.512336 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.515244 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.517987 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.518533 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.519037 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.522066 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
W0000 00:00:1735638218.525022 29375 gpu_timer.cc:114] Skipping the delay kernel, measurement accuracy will be reduced
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In[3], line 22
20 mod, _ = from_tensorflow(graph_def, shape_dict, [out_name], as_msc=False)
21 runner = TensorflowRunner(mod)
---> 22 runner.build()
23 outputs = runner.run([data], ret_type="list")
24 workspace.destory()
File /media/pc/data/lxw/ai/tvm/python/tvm/contrib/msc/core/runtime/runner.py:249, in BaseRunner.build(self, cache_dir, force_build, disable_tools)
246 else:
247 # Generate normal model
248 self._graphs, self._weights = self.reset_tools(tools=tools, cache_dir=cache_dir)
--> 249 self._model = self.generate_model()
250 build_msg += "Generate "
252 # Add tool message
File /media/pc/data/lxw/ai/tvm/python/tvm/contrib/msc/core/runtime/runner.py:441, in BaseRunner.generate_model(self, apply_hooks)
439 for hook in self._generate_config.get("pre_hooks", []):
440 graphs, weights = self._apply_hook("before generate", hook, graphs, weights)
--> 441 model = self._generate_model(graphs, weights)
442 if apply_hooks:
443 for hook in self._generate_config.get("post_hooks", []):
File /media/pc/data/lxw/ai/tvm/python/tvm/contrib/msc/framework/tensorflow/runtime/runner.py:112, in TensorflowRunner._generate_model(self, graphs, weights)
110 self._tf_graph = tf_v1.Graph()
111 with self._tf_graph.as_default():
--> 112 self._tf_outputs = super()._generate_model(graphs, weights)
113 return self._tf_graph
File /media/pc/data/lxw/ai/tvm/python/tvm/contrib/msc/core/runtime/runner.py:1229, in ModelRunner._generate_model(self, graphs, weights)
1213 def _generate_model(self, graphs: List[MSCGraph], weights: Dict[str, tvm.nd.array]) -> Any:
1214 """Codegen the model according to framework
1215
1216 Parameters
(...)
1226 The runnable model
1227 """
-> 1229 return self.codegen_func(
1230 graphs[0],
1231 weights,
1232 codegen_config=self._generate_config.get("codegen"),
1233 print_config=self._generate_config.get("print"),
1234 build_folder=self._generate_config["build_folder"],
1235 plugin=self._plugin,
1236 )
File /media/pc/data/lxw/ai/tvm/python/tvm/contrib/msc/framework/tensorflow/codegen/codegen.py:72, in to_tensorflow(graph, weights, codegen_config, print_config, build_folder, plugin)
70 if plugin:
71 model_args = model_args + [plugin]
---> 72 return codegen.load(model_args, pre_load=_save_weights)
File /media/pc/data/lxw/ai/tvm/python/tvm/contrib/msc/core/codegen/codegen.py:118, in CodeGen.load(self, inputs, pre_load, post_load, build_model)
116 elif self._code_format == "python":
117 builder = msc_utils.load_callable(self._graph.name + ".py:" + self._graph.name)
--> 118 obj = builder(*inputs)
119 else:
120 raise NotImplementedError(
121 "Code format {} is not supported".format(self._code_format)
122 )
File main.py:127, in main(res_0, weights)
File /media/pc/data/lxw/envs/anaconda3x/envs/xxx/lib/python3.12/site-packages/tensorflow/python/util/lazy_loader.py:207, in KerasLazyLoader.__getattr__(self, item)
200 raise AttributeError(
201 "`tf.compat.v2.keras` is not available with Keras 3. Just use "
202 "`import keras` instead."
203 )
204 elif self._tfll_submodule and self._tfll_submodule.startswith(
205 "__internal__.legacy."
206 ):
--> 207 raise AttributeError(
208 f"`{item}` is not available with Keras 3."
209 )
210 module = self._load()
211 return getattr(module, item)
AttributeError: `batch_normalization` is not available with Keras 3.