You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by jw...@apache.org on 2021/04/09 19:17:26 UTC

[tvm] 05/09: [AMD:ONNXRT:TVM] Demonstrate output shape extraction from module and enable the application of tuning logs to the module.

This is an automated email from the ASF dual-hosted git repository.

jwfromm pushed a commit to branch checkpoint
in repository https://gitbox.apache.org/repos/asf/tvm.git

commit 0679ad4e927bf644bb8499ee4f0f5c3d32707d52
Author: Chris Sullivan <cs...@octoml.ai>
AuthorDate: Thu Sep 3 23:16:49 2020 -0700

    [AMD:ONNXRT:TVM] Demonstrate output shape extraction from module
    and enable the application of tuning logs to the module.
---
 include/tvm/driver/jit_interface.h    |  4 +++-
 python/tvm/relay/frontend/jit/onnx.py | 25 ++++++++++++++++++++++++-
 src/driver/driver_api.cc              | 15 +++++++++++++--
 3 files changed, 40 insertions(+), 4 deletions(-)

diff --git a/include/tvm/driver/jit_interface.h b/include/tvm/driver/jit_interface.h
index e9203ee..47b98c9 100644
--- a/include/tvm/driver/jit_interface.h
+++ b/include/tvm/driver/jit_interface.h
@@ -3,7 +3,9 @@
 #ifdef __cplusplus
 extern "C" {
     EXPORT_DLL tvm::runtime::Module TVMCompile(const std::string& onnx_txt, const std::string& target, const std::string& target_host, int opt_level, const std::vector<std::vector<int64_t>>& input_shapes);
-    EXPORT_DLL void TVMRun(tvm::runtime::Module& mod, std::vector<DLTensor>& inputs, std::vector<DLTensor>& outputs, tvm::runtime::TVMRetValue* ret);
+    EXPORT_DLL void TVMExtractOutputShapes(tvm::runtime::Module& mod, size_t num_outputs, std::vector<std::vector<int64_t>>& output_shapes);
+
+    EXPORT_DLL void TVMRun(tvm::runtime::Module& mod, std::vector<DLTensor>& inputs, std::vector<DLTensor>& outputs);
     
     
 }  // TVM_EXTERN_C
diff --git a/python/tvm/relay/frontend/jit/onnx.py b/python/tvm/relay/frontend/jit/onnx.py
index 3672bbe..0f2a79c 100644
--- a/python/tvm/relay/frontend/jit/onnx.py
+++ b/python/tvm/relay/frontend/jit/onnx.py
@@ -14,9 +14,27 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+import os
 import onnx
 import tvm
 import tvm.relay
+import tvm.autotvm as autotvm
+import timeit
+import numpy as np
+
+@tvm.register_func("tvm_run_with_benchmark")
+def run_with_benchmark(mod):
+    run = mod.get_function('run')
+    def benchmark(name):
+        t = timeit.Timer(lambda: run()).repeat(repeat=5, number=5)
+        ts = np.array(t) * 1000
+        print("{} benchmark results: {:.2f}ms mean, {:.2f}ms median, {:.2f}ms std".format(
+            name, np.mean(ts), np.median(ts), np.std(ts)
+        ))
+    if os.getenv("AUTOTVM_TUNING_LOG"):
+        benchmark("Tuned")
+    else:
+        benchmark("Baseline")
 
 @tvm.register_func("tvm_onnx_import_and_compile")
 def onnx_compile(model_string, target, target_host, opt_level, input_shapes):
@@ -26,7 +44,12 @@ def onnx_compile(model_string, target, target_host, opt_level, input_shapes):
 
     irmod, params = tvm.relay.frontend.from_onnx(model, input_shapes, opset=11)
     with tvm.relay.build_config(opt_level=opt_level):
-        graph, lib, params = tvm.relay.build(irmod, target_host=target_host, target=target, params=params)
+        tuning_logfile = os.getenv("AUTOTVM_TUNING_LOG")
+        if tuning_logfile:
+            with autotvm.apply_history_best(tuning_logfile):
+                graph, lib, params = tvm.relay.build(irmod, target_host=target_host, target=target, params=params)
+        else:
+            graph, lib, params = tvm.relay.build(irmod, target_host=target_host, target=target, params=params)
 
     ctx = tvm.context(target, 0)
     m = tvm.contrib.graph_runtime.create(graph, lib, ctx)
diff --git a/src/driver/driver_api.cc b/src/driver/driver_api.cc
index 4399855..9172283 100644
--- a/src/driver/driver_api.cc
+++ b/src/driver/driver_api.cc
@@ -346,7 +346,17 @@ tvm::runtime::Module TVMCompile(const std::string& onnx_txt, const std::string&
   return mod;
 }
 
-void TVMRun(tvm::runtime::Module& mod, std::vector<DLTensor>& inputs, std::vector<DLTensor>& outputs, tvm::runtime::TVMRetValue* ret)
+void TVMExtractOutputShapes(tvm::runtime::Module& mod, size_t num_outputs, std::vector<std::vector<int64_t>>& output_shapes)
+{
+  tvm::PackedFunc get_output = mod.GetFunction("get_output", false);
+  for (size_t i = 0; i < num_outputs; i++)
+  {
+    tvm::runtime::NDArray output_array = get_output(i);
+    output_shapes.push_back(output_array.Shape());
+  }
+}
+
+void TVMRun(tvm::runtime::Module& mod, std::vector<DLTensor>& inputs, std::vector<DLTensor>& outputs)
 {
   tvm::PackedFunc set_input = mod.GetFunction("set_input_zero_copy", false);
   for (size_t i = 0; i < inputs.size(); i++)
@@ -354,7 +364,8 @@ void TVMRun(tvm::runtime::Module& mod, std::vector<DLTensor>& inputs, std::vecto
     set_input(i, &inputs[i]);
   }
 
-  mod.GetFunction("run", false)();
+  const tvm::PackedFunc* run = tvm::runtime::Registry::Get("tvm_run_with_benchmark");
+  (*run)(mod);
 
   tvm::PackedFunc get_output = mod.GetFunction("get_output", false);
   for (size_t i = 0; i < outputs.size(); i++)